reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use std::{
6    path::{Path, PathBuf},
7    time::Duration,
8};
9
10#[cfg(feature = "serde")]
11const EXTENSION: &str = "toml";
12
13/// The default prune block interval
14pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
15
16/// Configuration for the reth node.
17#[derive(Debug, Clone, Default, PartialEq, Eq)]
18#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
19#[cfg_attr(feature = "serde", serde(default))]
20pub struct Config {
21    /// Configuration for each stage in the pipeline.
22    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
23    pub stages: StageConfig,
24    /// Configuration for pruning.
25    #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
26    pub prune: Option<PruneConfig>,
27    /// Configuration for the discovery service.
28    pub peers: PeersConfig,
29    /// Configuration for peer sessions.
30    pub sessions: SessionsConfig,
31}
32
33impl Config {
34    /// Sets the pruning configuration.
35    pub fn update_prune_config(&mut self, prune_config: PruneConfig) {
36        self.prune = Some(prune_config);
37    }
38}
39
40#[cfg(feature = "serde")]
41impl Config {
42    /// Load a [`Config`] from a specified path.
43    ///
44    /// A new configuration file is created with default values if none
45    /// exists.
46    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
47        let path = path.as_ref();
48        match std::fs::read_to_string(path) {
49            Ok(cfg_string) => {
50                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
51            }
52            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
53                if let Some(parent) = path.parent() {
54                    std::fs::create_dir_all(parent)
55                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
56                }
57                let cfg = Self::default();
58                let s = toml::to_string_pretty(&cfg)
59                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
60                std::fs::write(path, s)
61                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
62                Ok(cfg)
63            }
64            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
65        }
66    }
67
68    /// Returns the [`PeersConfig`] for the node.
69    ///
70    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
71    pub fn peers_config_with_basic_nodes_from_file(
72        &self,
73        peers_file: Option<&Path>,
74    ) -> PeersConfig {
75        self.peers
76            .clone()
77            .with_basic_nodes_from_file(peers_file)
78            .unwrap_or_else(|_| self.peers.clone())
79    }
80
81    /// Save the configuration to toml file.
82    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
83        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
84            return Err(std::io::Error::new(
85                std::io::ErrorKind::InvalidInput,
86                format!("reth config file extension must be '{EXTENSION}'"),
87            ));
88        }
89
90        std::fs::write(
91            path,
92            toml::to_string(self)
93                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
94        )
95    }
96}
97
98/// Configuration for each stage in the pipeline.
99#[derive(Debug, Clone, Default, PartialEq, Eq)]
100#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
101#[cfg_attr(feature = "serde", serde(default))]
102pub struct StageConfig {
103    /// Header stage configuration.
104    pub headers: HeadersConfig,
105    /// Body stage configuration.
106    pub bodies: BodiesConfig,
107    /// Sender Recovery stage configuration.
108    pub sender_recovery: SenderRecoveryConfig,
109    /// Execution stage configuration.
110    pub execution: ExecutionConfig,
111    /// Prune stage configuration.
112    pub prune: PruneStageConfig,
113    /// Account Hashing stage configuration.
114    pub account_hashing: HashingConfig,
115    /// Storage Hashing stage configuration.
116    pub storage_hashing: HashingConfig,
117    /// Merkle stage configuration.
118    pub merkle: MerkleConfig,
119    /// Transaction Lookup stage configuration.
120    pub transaction_lookup: TransactionLookupConfig,
121    /// Index Account History stage configuration.
122    pub index_account_history: IndexHistoryConfig,
123    /// Index Storage History stage configuration.
124    pub index_storage_history: IndexHistoryConfig,
125    /// Common ETL related configuration.
126    pub etl: EtlConfig,
127}
128
129impl StageConfig {
130    /// The highest threshold (in number of blocks) for switching between incremental and full
131    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
132    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
133    /// `ExecutionStage`
134    pub fn execution_external_clean_threshold(&self) -> u64 {
135        self.merkle
136            .clean_threshold
137            .max(self.account_hashing.clean_threshold)
138            .max(self.storage_hashing.clean_threshold)
139    }
140}
141
142/// Header stage configuration.
143#[derive(Debug, Clone, Copy, PartialEq, Eq)]
144#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
145#[cfg_attr(feature = "serde", serde(default))]
146pub struct HeadersConfig {
147    /// The maximum number of requests to send concurrently.
148    ///
149    /// Default: 100
150    pub downloader_max_concurrent_requests: usize,
151    /// The minimum number of requests to send concurrently.
152    ///
153    /// Default: 5
154    pub downloader_min_concurrent_requests: usize,
155    /// Maximum amount of responses to buffer internally.
156    /// The response contains multiple headers.
157    pub downloader_max_buffered_responses: usize,
158    /// The maximum number of headers to request from a peer at a time.
159    pub downloader_request_limit: u64,
160    /// The maximum number of headers to download before committing progress to the database.
161    pub commit_threshold: u64,
162}
163
164impl Default for HeadersConfig {
165    fn default() -> Self {
166        Self {
167            commit_threshold: 10_000,
168            downloader_request_limit: 1_000,
169            downloader_max_concurrent_requests: 100,
170            downloader_min_concurrent_requests: 5,
171            downloader_max_buffered_responses: 100,
172        }
173    }
174}
175
176/// Body stage configuration.
177#[derive(Debug, Clone, Copy, PartialEq, Eq)]
178#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
179#[cfg_attr(feature = "serde", serde(default))]
180pub struct BodiesConfig {
181    /// The batch size of non-empty blocks per one request
182    ///
183    /// Default: 200
184    pub downloader_request_limit: u64,
185    /// The maximum number of block bodies returned at once from the stream
186    ///
187    /// Default: `1_000`
188    pub downloader_stream_batch_size: usize,
189    /// The size of the internal block buffer in bytes.
190    ///
191    /// Default: 2GB
192    pub downloader_max_buffered_blocks_size_bytes: usize,
193    /// The minimum number of requests to send concurrently.
194    ///
195    /// Default: 5
196    pub downloader_min_concurrent_requests: usize,
197    /// The maximum number of requests to send concurrently.
198    /// This is equal to the max number of peers.
199    ///
200    /// Default: 100
201    pub downloader_max_concurrent_requests: usize,
202}
203
204impl Default for BodiesConfig {
205    fn default() -> Self {
206        Self {
207            downloader_request_limit: 200,
208            downloader_stream_batch_size: 1_000,
209            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
210            downloader_min_concurrent_requests: 5,
211            downloader_max_concurrent_requests: 100,
212        }
213    }
214}
215
216/// Sender recovery stage configuration.
217#[derive(Debug, Clone, Copy, PartialEq, Eq)]
218#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
219#[cfg_attr(feature = "serde", serde(default))]
220pub struct SenderRecoveryConfig {
221    /// The maximum number of transactions to process before committing progress to the database.
222    pub commit_threshold: u64,
223}
224
225impl Default for SenderRecoveryConfig {
226    fn default() -> Self {
227        Self { commit_threshold: 5_000_000 }
228    }
229}
230
231/// Execution stage configuration.
232#[derive(Debug, Clone, Copy, PartialEq, Eq)]
233#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
234#[cfg_attr(feature = "serde", serde(default))]
235pub struct ExecutionConfig {
236    /// The maximum number of blocks to process before the execution stage commits.
237    pub max_blocks: Option<u64>,
238    /// The maximum number of state changes to keep in memory before the execution stage commits.
239    pub max_changes: Option<u64>,
240    /// The maximum cumulative amount of gas to process before the execution stage commits.
241    pub max_cumulative_gas: Option<u64>,
242    /// The maximum time spent on blocks processing before the execution stage commits.
243    #[cfg_attr(
244        feature = "serde",
245        serde(
246            serialize_with = "humantime_serde::serialize",
247            deserialize_with = "deserialize_duration"
248        )
249    )]
250    pub max_duration: Option<Duration>,
251}
252
253impl Default for ExecutionConfig {
254    fn default() -> Self {
255        Self {
256            max_blocks: Some(500_000),
257            max_changes: Some(5_000_000),
258            // 50k full blocks of 30M gas
259            max_cumulative_gas: Some(30_000_000 * 50_000),
260            // 10 minutes
261            max_duration: Some(Duration::from_secs(10 * 60)),
262        }
263    }
264}
265
266impl From<ExecutionConfig> for ExecutionStageThresholds {
267    fn from(config: ExecutionConfig) -> Self {
268        Self {
269            max_blocks: config.max_blocks,
270            max_changes: config.max_changes,
271            max_cumulative_gas: config.max_cumulative_gas,
272            max_duration: config.max_duration,
273        }
274    }
275}
276
277/// Prune stage configuration.
278#[derive(Debug, Clone, Copy, PartialEq, Eq)]
279#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
280#[cfg_attr(feature = "serde", serde(default))]
281pub struct PruneStageConfig {
282    /// The maximum number of entries to prune before committing progress to the database.
283    pub commit_threshold: usize,
284}
285
286impl Default for PruneStageConfig {
287    fn default() -> Self {
288        Self { commit_threshold: 1_000_000 }
289    }
290}
291
292/// Hashing stage configuration.
293#[derive(Debug, Clone, Copy, PartialEq, Eq)]
294#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
295#[cfg_attr(feature = "serde", serde(default))]
296pub struct HashingConfig {
297    /// The threshold (in number of blocks) for switching between
298    /// incremental hashing and full hashing.
299    pub clean_threshold: u64,
300    /// The maximum number of entities to process before committing progress to the database.
301    pub commit_threshold: u64,
302}
303
304impl Default for HashingConfig {
305    fn default() -> Self {
306        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
307    }
308}
309
310/// Merkle stage configuration.
311#[derive(Debug, Clone, Copy, PartialEq, Eq)]
312#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
313#[cfg_attr(feature = "serde", serde(default))]
314pub struct MerkleConfig {
315    /// The threshold (in number of blocks) for switching from incremental trie building of changes
316    /// to whole rebuild.
317    pub clean_threshold: u64,
318}
319
320impl Default for MerkleConfig {
321    fn default() -> Self {
322        Self { clean_threshold: 5_000 }
323    }
324}
325
326/// Transaction Lookup stage configuration.
327#[derive(Debug, Clone, Copy, PartialEq, Eq)]
328#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
329#[cfg_attr(feature = "serde", serde(default))]
330pub struct TransactionLookupConfig {
331    /// The maximum number of transactions to process before writing to disk.
332    pub chunk_size: u64,
333}
334
335impl Default for TransactionLookupConfig {
336    fn default() -> Self {
337        Self { chunk_size: 5_000_000 }
338    }
339}
340
341/// Common ETL related configuration.
342#[derive(Debug, Clone, PartialEq, Eq)]
343#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
344#[cfg_attr(feature = "serde", serde(default))]
345pub struct EtlConfig {
346    /// Data directory where temporary files are created.
347    pub dir: Option<PathBuf>,
348    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
349    pub file_size: usize,
350}
351
352impl Default for EtlConfig {
353    fn default() -> Self {
354        Self { dir: None, file_size: Self::default_file_size() }
355    }
356}
357
358impl EtlConfig {
359    /// Creates an ETL configuration
360    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
361        Self { dir, file_size }
362    }
363
364    /// Return default ETL directory from datadir path.
365    pub fn from_datadir(path: &Path) -> PathBuf {
366        path.join("etl-tmp")
367    }
368
369    /// Default size in bytes of data held in memory before being flushed to disk as a file.
370    pub const fn default_file_size() -> usize {
371        // 500 MB
372        500 * (1024 * 1024)
373    }
374}
375
376/// History stage configuration.
377#[derive(Debug, Clone, Copy, PartialEq, Eq)]
378#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
379#[cfg_attr(feature = "serde", serde(default))]
380pub struct IndexHistoryConfig {
381    /// The maximum number of blocks to process before committing progress to the database.
382    pub commit_threshold: u64,
383}
384
385impl Default for IndexHistoryConfig {
386    fn default() -> Self {
387        Self { commit_threshold: 100_000 }
388    }
389}
390
391/// Pruning configuration.
392#[derive(Debug, Clone, PartialEq, Eq)]
393#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
394#[cfg_attr(feature = "serde", serde(default))]
395pub struct PruneConfig {
396    /// Minimum pruning interval measured in blocks.
397    pub block_interval: usize,
398    /// Pruning configuration for every part of the data that can be pruned.
399    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
400    pub segments: PruneModes,
401}
402
403impl Default for PruneConfig {
404    fn default() -> Self {
405        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() }
406    }
407}
408
409impl PruneConfig {
410    /// Returns whether there is any kind of receipt pruning configuration.
411    pub fn has_receipts_pruning(&self) -> bool {
412        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
413    }
414
415    /// Merges another `PruneConfig` into this one, taking values from the other config if and only
416    /// if the corresponding value in this config is not set.
417    pub fn merge(&mut self, other: Option<Self>) {
418        let Some(other) = other else { return };
419        let Self {
420            block_interval,
421            segments:
422                PruneModes {
423                    sender_recovery,
424                    transaction_lookup,
425                    receipts,
426                    account_history,
427                    storage_history,
428                    receipts_log_filter,
429                },
430        } = other;
431
432        // Merge block_interval, only update if it's the default interval
433        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
434            self.block_interval = block_interval;
435        }
436
437        // Merge the various segment prune modes
438        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
439        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
440        self.segments.receipts = self.segments.receipts.or(receipts);
441        self.segments.account_history = self.segments.account_history.or(account_history);
442        self.segments.storage_history = self.segments.storage_history.or(storage_history);
443
444        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
445            self.segments.receipts_log_filter = receipts_log_filter;
446        }
447    }
448}
449
450/// Helper type to support older versions of Duration deserialization.
451#[cfg(feature = "serde")]
452fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
453where
454    D: serde::de::Deserializer<'de>,
455{
456    #[derive(serde::Deserialize)]
457    #[serde(untagged)]
458    enum AnyDuration {
459        #[serde(deserialize_with = "humantime_serde::deserialize")]
460        Human(Option<Duration>),
461        Duration(Option<Duration>),
462    }
463
464    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
465        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
466    })
467}
468
469#[cfg(all(test, feature = "serde"))]
470mod tests {
471    use super::{Config, EXTENSION};
472    use crate::PruneConfig;
473    use alloy_primitives::Address;
474    use reth_network_peers::TrustedPeer;
475    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
476    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
477
478    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
479        let temp_dir = tempfile::tempdir().unwrap();
480        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
481
482        proc(&config_path);
483
484        temp_dir.close().unwrap()
485    }
486
487    /// Run a test function with a temporary config path as fixture.
488    fn with_config_path(test_fn: fn(&Path)) {
489        // Create a temporary directory for the config file
490        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
491        // Create the config file path
492        let config_path =
493            config_dir.path().join("example-app").join("example-config").with_extension("toml");
494        // Run the test function with the config path
495        test_fn(&config_path);
496        config_dir.close().expect("removing test fixture failed");
497    }
498
499    #[test]
500    fn test_load_path_works() {
501        with_config_path(|path| {
502            let config = Config::from_path(path).expect("load_path failed");
503            assert_eq!(config, Config::default());
504        })
505    }
506
507    #[test]
508    fn test_load_path_reads_existing_config() {
509        with_config_path(|path| {
510            let config = Config::default();
511
512            // Create the parent directory if it doesn't exist
513            if let Some(parent) = path.parent() {
514                std::fs::create_dir_all(parent).expect("Failed to create directories");
515            }
516
517            // Write the config to the file
518            std::fs::write(path, toml::to_string(&config).unwrap())
519                .expect("Failed to write config");
520
521            // Load the config from the file and compare it
522            let loaded = Config::from_path(path).expect("load_path failed");
523            assert_eq!(config, loaded);
524        })
525    }
526
527    #[test]
528    fn test_load_path_fails_on_invalid_toml() {
529        with_config_path(|path| {
530            let invalid_toml = "invalid toml data";
531
532            // Create the parent directory if it doesn't exist
533            if let Some(parent) = path.parent() {
534                std::fs::create_dir_all(parent).expect("Failed to create directories");
535            }
536
537            // Write invalid TOML data to the file
538            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
539
540            // Attempt to load the config should fail
541            let result = Config::from_path(path);
542            assert!(result.is_err());
543        })
544    }
545
546    #[test]
547    fn test_load_path_creates_directory_if_not_exists() {
548        with_config_path(|path| {
549            // Ensure the directory does not exist
550            let parent = path.parent().unwrap();
551            assert!(!parent.exists());
552
553            // Load the configuration, which should create the directory and a default config file
554            let config = Config::from_path(path).expect("load_path failed");
555            assert_eq!(config, Config::default());
556
557            // The directory and file should now exist
558            assert!(parent.exists());
559            assert!(path.exists());
560        });
561    }
562
563    #[test]
564    fn test_store_config() {
565        with_tempdir("config-store-test", |config_path| {
566            let config = Config::default();
567            std::fs::write(
568                config_path,
569                toml::to_string(&config).expect("Failed to serialize config"),
570            )
571            .expect("Failed to write config file");
572        })
573    }
574
575    #[test]
576    fn test_store_config_method() {
577        with_tempdir("config-store-test-method", |config_path| {
578            let config = Config::default();
579            config.save(config_path).expect("Failed to store config");
580        })
581    }
582
583    #[test]
584    fn test_load_config() {
585        with_tempdir("config-load-test", |config_path| {
586            let config = Config::default();
587
588            // Write the config to a file
589            std::fs::write(
590                config_path,
591                toml::to_string(&config).expect("Failed to serialize config"),
592            )
593            .expect("Failed to write config file");
594
595            // Load the config from the file
596            let loaded_config = Config::from_path(config_path).unwrap();
597
598            // Compare the loaded config with the original config
599            assert_eq!(config, loaded_config);
600        })
601    }
602
603    #[test]
604    fn test_load_execution_stage() {
605        with_tempdir("config-load-test", |config_path| {
606            let mut config = Config::default();
607            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
608
609            // Write the config to a file
610            std::fs::write(
611                config_path,
612                toml::to_string(&config).expect("Failed to serialize config"),
613            )
614            .expect("Failed to write config file");
615
616            // Load the config from the file
617            let loaded_config = Config::from_path(config_path).unwrap();
618
619            // Compare the loaded config with the original config
620            assert_eq!(config, loaded_config);
621        })
622    }
623
624    // ensures config deserialization is backwards compatible
625    #[test]
626    fn test_backwards_compatibility() {
627        let alpha_0_0_8 = r"#
628[stages.headers]
629downloader_max_concurrent_requests = 100
630downloader_min_concurrent_requests = 5
631downloader_max_buffered_responses = 100
632downloader_request_limit = 1000
633commit_threshold = 10000
634
635[stages.bodies]
636downloader_request_limit = 200
637downloader_stream_batch_size = 1000
638downloader_max_buffered_blocks_size_bytes = 2147483648
639downloader_min_concurrent_requests = 5
640downloader_max_concurrent_requests = 100
641
642[stages.sender_recovery]
643commit_threshold = 5000000
644
645[stages.execution]
646max_blocks = 500000
647max_changes = 5000000
648
649[stages.account_hashing]
650clean_threshold = 500000
651commit_threshold = 100000
652
653[stages.storage_hashing]
654clean_threshold = 500000
655commit_threshold = 100000
656
657[stages.merkle]
658clean_threshold = 50000
659
660[stages.transaction_lookup]
661chunk_size = 5000000
662
663[stages.index_account_history]
664commit_threshold = 100000
665
666[stages.index_storage_history]
667commit_threshold = 100000
668
669[peers]
670refill_slots_interval = '1s'
671trusted_nodes = []
672connect_trusted_nodes_only = false
673max_backoff_count = 5
674ban_duration = '12h'
675
676[peers.connection_info]
677max_outbound = 100
678max_inbound = 30
679
680[peers.reputation_weights]
681bad_message = -16384
682bad_block = -16384
683bad_transactions = -16384
684already_seen_transactions = 0
685timeout = -4096
686bad_protocol = -2147483648
687failed_to_connect = -25600
688dropped = -4096
689
690[peers.backoff_durations]
691low = '30s'
692medium = '3m'
693high = '15m'
694max = '1h'
695
696[sessions]
697session_command_buffer = 32
698session_event_buffer = 260
699
700[sessions.limits]
701
702[sessions.initial_internal_request_timeout]
703secs = 20
704nanos = 0
705
706[sessions.protocol_breach_request_timeout]
707secs = 120
708nanos = 0
709
710[prune]
711block_interval = 5
712
713[prune.parts]
714sender_recovery = { distance = 16384 }
715transaction_lookup = 'full'
716receipts = { before = 1920000 }
717account_history = { distance = 16384 }
718storage_history = { distance = 16384 }
719[prune.parts.receipts_log_filter]
720'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
721'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
722#";
723        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
724
725        let alpha_0_0_11 = r"#
726[prune.segments]
727sender_recovery = { distance = 16384 }
728transaction_lookup = 'full'
729receipts = { before = 1920000 }
730account_history = { distance = 16384 }
731storage_history = { distance = 16384 }
732[prune.segments.receipts_log_filter]
733'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
734'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
735#";
736        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
737
738        let alpha_0_0_18 = r"#
739[stages.headers]
740downloader_max_concurrent_requests = 100
741downloader_min_concurrent_requests = 5
742downloader_max_buffered_responses = 100
743downloader_request_limit = 1000
744commit_threshold = 10000
745
746[stages.total_difficulty]
747commit_threshold = 100000
748
749[stages.bodies]
750downloader_request_limit = 200
751downloader_stream_batch_size = 1000
752downloader_max_buffered_blocks_size_bytes = 2147483648
753downloader_min_concurrent_requests = 5
754downloader_max_concurrent_requests = 100
755
756[stages.sender_recovery]
757commit_threshold = 5000000
758
759[stages.execution]
760max_blocks = 500000
761max_changes = 5000000
762max_cumulative_gas = 1500000000000
763[stages.execution.max_duration]
764secs = 600
765nanos = 0
766
767[stages.account_hashing]
768clean_threshold = 500000
769commit_threshold = 100000
770
771[stages.storage_hashing]
772clean_threshold = 500000
773commit_threshold = 100000
774
775[stages.merkle]
776clean_threshold = 50000
777
778[stages.transaction_lookup]
779commit_threshold = 5000000
780
781[stages.index_account_history]
782commit_threshold = 100000
783
784[stages.index_storage_history]
785commit_threshold = 100000
786
787[peers]
788refill_slots_interval = '5s'
789trusted_nodes = []
790connect_trusted_nodes_only = false
791max_backoff_count = 5
792ban_duration = '12h'
793
794[peers.connection_info]
795max_outbound = 100
796max_inbound = 30
797max_concurrent_outbound_dials = 10
798
799[peers.reputation_weights]
800bad_message = -16384
801bad_block = -16384
802bad_transactions = -16384
803already_seen_transactions = 0
804timeout = -4096
805bad_protocol = -2147483648
806failed_to_connect = -25600
807dropped = -4096
808bad_announcement = -1024
809
810[peers.backoff_durations]
811low = '30s'
812medium = '3m'
813high = '15m'
814max = '1h'
815
816[sessions]
817session_command_buffer = 32
818session_event_buffer = 260
819
820[sessions.limits]
821
822[sessions.initial_internal_request_timeout]
823secs = 20
824nanos = 0
825
826[sessions.protocol_breach_request_timeout]
827secs = 120
828nanos = 0
829#";
830        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
831        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
832
833        let alpha_0_0_19 = r"#
834[stages.headers]
835downloader_max_concurrent_requests = 100
836downloader_min_concurrent_requests = 5
837downloader_max_buffered_responses = 100
838downloader_request_limit = 1000
839commit_threshold = 10000
840
841[stages.total_difficulty]
842commit_threshold = 100000
843
844[stages.bodies]
845downloader_request_limit = 200
846downloader_stream_batch_size = 1000
847downloader_max_buffered_blocks_size_bytes = 2147483648
848downloader_min_concurrent_requests = 5
849downloader_max_concurrent_requests = 100
850
851[stages.sender_recovery]
852commit_threshold = 5000000
853
854[stages.execution]
855max_blocks = 500000
856max_changes = 5000000
857max_cumulative_gas = 1500000000000
858max_duration = '10m'
859
860[stages.account_hashing]
861clean_threshold = 500000
862commit_threshold = 100000
863
864[stages.storage_hashing]
865clean_threshold = 500000
866commit_threshold = 100000
867
868[stages.merkle]
869clean_threshold = 50000
870
871[stages.transaction_lookup]
872commit_threshold = 5000000
873
874[stages.index_account_history]
875commit_threshold = 100000
876
877[stages.index_storage_history]
878commit_threshold = 100000
879
880[peers]
881refill_slots_interval = '5s'
882trusted_nodes = []
883connect_trusted_nodes_only = false
884max_backoff_count = 5
885ban_duration = '12h'
886
887[peers.connection_info]
888max_outbound = 100
889max_inbound = 30
890max_concurrent_outbound_dials = 10
891
892[peers.reputation_weights]
893bad_message = -16384
894bad_block = -16384
895bad_transactions = -16384
896already_seen_transactions = 0
897timeout = -4096
898bad_protocol = -2147483648
899failed_to_connect = -25600
900dropped = -4096
901bad_announcement = -1024
902
903[peers.backoff_durations]
904low = '30s'
905medium = '3m'
906high = '15m'
907max = '1h'
908
909[sessions]
910session_command_buffer = 32
911session_event_buffer = 260
912
913[sessions.limits]
914
915[sessions.initial_internal_request_timeout]
916secs = 20
917nanos = 0
918
919[sessions.protocol_breach_request_timeout]
920secs = 120
921nanos = 0
922#";
923        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
924    }
925
926    // ensures prune config deserialization is backwards compatible
927    #[test]
928    fn test_backwards_compatibility_prune_full() {
929        let s = r"#
930[prune]
931block_interval = 5
932
933[prune.segments]
934sender_recovery = { distance = 16384 }
935transaction_lookup = 'full'
936receipts = { distance = 16384 }
937#";
938        let _conf: Config = toml::from_str(s).unwrap();
939
940        let s = r"#
941[prune]
942block_interval = 5
943
944[prune.segments]
945sender_recovery = { distance = 16384 }
946transaction_lookup = 'full'
947receipts = 'full'
948#";
949        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
950        assert!(err.contains("invalid value: string \"full\""), "{}", err);
951    }
952
953    #[test]
954    fn test_prune_config_merge() {
955        let mut config1 = PruneConfig {
956            block_interval: 5,
957            segments: PruneModes {
958                sender_recovery: Some(PruneMode::Full),
959                transaction_lookup: None,
960                receipts: Some(PruneMode::Distance(1000)),
961                account_history: None,
962                storage_history: Some(PruneMode::Before(5000)),
963                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
964                    Address::random(),
965                    PruneMode::Full,
966                )])),
967            },
968        };
969
970        let config2 = PruneConfig {
971            block_interval: 10,
972            segments: PruneModes {
973                sender_recovery: Some(PruneMode::Distance(500)),
974                transaction_lookup: Some(PruneMode::Full),
975                receipts: Some(PruneMode::Full),
976                account_history: Some(PruneMode::Distance(2000)),
977                storage_history: Some(PruneMode::Distance(3000)),
978                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
979                    (Address::random(), PruneMode::Distance(1000)),
980                    (Address::random(), PruneMode::Before(2000)),
981                ])),
982            },
983        };
984
985        let original_filter = config1.segments.receipts_log_filter.clone();
986        config1.merge(Some(config2));
987
988        // Check that the configuration has been merged. Any configuration present in config1
989        // should not be overwritten by config2
990        assert_eq!(config1.block_interval, 10);
991        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
992        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
993        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
994        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
995        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
996        assert_eq!(config1.segments.receipts_log_filter, original_filter);
997    }
998
999    #[test]
1000    fn test_conf_trust_nodes_only() {
1001        let trusted_nodes_only = r"#
1002[peers]
1003trusted_nodes_only = true
1004#";
1005        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1006        assert!(conf.peers.trusted_nodes_only);
1007
1008        let trusted_nodes_only = r"#
1009[peers]
1010connect_trusted_nodes_only = true
1011#";
1012        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1013        assert!(conf.peers.trusted_nodes_only);
1014    }
1015
1016    #[test]
1017    fn test_can_support_dns_in_trusted_nodes() {
1018        let reth_toml = r#"
1019    [peers]
1020    trusted_nodes = [
1021        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1022        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1023    ]
1024    "#;
1025
1026        let conf: Config = toml::from_str(reth_toml).unwrap();
1027        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1028
1029        let expected_enodes = vec![
1030            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1031            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1032        ];
1033
1034        for enode in expected_enodes {
1035            let node = TrustedPeer::from_str(enode).unwrap();
1036            assert!(conf.peers.trusted_nodes.contains(&node));
1037        }
1038    }
1039}