reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use std::{
6    path::{Path, PathBuf},
7    time::Duration,
8};
9use url::Url;
10
11#[cfg(feature = "serde")]
12const EXTENSION: &str = "toml";
13
14/// The default prune block interval
15pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
16
17/// Configuration for the reth node.
18#[derive(Debug, Clone, Default, PartialEq, Eq)]
19#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
20#[cfg_attr(feature = "serde", serde(default))]
21pub struct Config {
22    /// Configuration for each stage in the pipeline.
23    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
24    pub stages: StageConfig,
25    /// Configuration for pruning.
26    #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))]
27    pub prune: Option<PruneConfig>,
28    /// Configuration for the discovery service.
29    pub peers: PeersConfig,
30    /// Configuration for peer sessions.
31    pub sessions: SessionsConfig,
32}
33
34impl Config {
35    /// Sets the pruning configuration.
36    pub fn update_prune_config(&mut self, prune_config: PruneConfig) {
37        self.prune = Some(prune_config);
38    }
39}
40
41#[cfg(feature = "serde")]
42impl Config {
43    /// Load a [`Config`] from a specified path.
44    ///
45    /// A new configuration file is created with default values if none
46    /// exists.
47    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
48        let path = path.as_ref();
49        match std::fs::read_to_string(path) {
50            Ok(cfg_string) => {
51                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
52            }
53            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
54                if let Some(parent) = path.parent() {
55                    std::fs::create_dir_all(parent)
56                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
57                }
58                let cfg = Self::default();
59                let s = toml::to_string_pretty(&cfg)
60                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
61                std::fs::write(path, s)
62                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
63                Ok(cfg)
64            }
65            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
66        }
67    }
68
69    /// Returns the [`PeersConfig`] for the node.
70    ///
71    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
72    pub fn peers_config_with_basic_nodes_from_file(
73        &self,
74        peers_file: Option<&Path>,
75    ) -> PeersConfig {
76        self.peers
77            .clone()
78            .with_basic_nodes_from_file(peers_file)
79            .unwrap_or_else(|_| self.peers.clone())
80    }
81
82    /// Save the configuration to toml file.
83    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
84        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
85            return Err(std::io::Error::new(
86                std::io::ErrorKind::InvalidInput,
87                format!("reth config file extension must be '{EXTENSION}'"),
88            ));
89        }
90
91        std::fs::write(
92            path,
93            toml::to_string(self)
94                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
95        )
96    }
97}
98
99/// Configuration for each stage in the pipeline.
100#[derive(Debug, Clone, Default, PartialEq, Eq)]
101#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
102#[cfg_attr(feature = "serde", serde(default))]
103pub struct StageConfig {
104    /// ERA stage configuration.
105    pub era: EraConfig,
106    /// Header stage configuration.
107    pub headers: HeadersConfig,
108    /// Body stage configuration.
109    pub bodies: BodiesConfig,
110    /// Sender Recovery stage configuration.
111    pub sender_recovery: SenderRecoveryConfig,
112    /// Execution stage configuration.
113    pub execution: ExecutionConfig,
114    /// Prune stage configuration.
115    pub prune: PruneStageConfig,
116    /// Account Hashing stage configuration.
117    pub account_hashing: HashingConfig,
118    /// Storage Hashing stage configuration.
119    pub storage_hashing: HashingConfig,
120    /// Merkle stage configuration.
121    pub merkle: MerkleConfig,
122    /// Transaction Lookup stage configuration.
123    pub transaction_lookup: TransactionLookupConfig,
124    /// Index Account History stage configuration.
125    pub index_account_history: IndexHistoryConfig,
126    /// Index Storage History stage configuration.
127    pub index_storage_history: IndexHistoryConfig,
128    /// Common ETL related configuration.
129    pub etl: EtlConfig,
130}
131
132impl StageConfig {
133    /// The highest threshold (in number of blocks) for switching between incremental and full
134    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
135    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
136    /// `ExecutionStage`
137    pub fn execution_external_clean_threshold(&self) -> u64 {
138        self.merkle
139            .incremental_threshold
140            .max(self.account_hashing.clean_threshold)
141            .max(self.storage_hashing.clean_threshold)
142    }
143}
144
145/// ERA stage configuration.
146#[derive(Debug, Clone, Default, PartialEq, Eq)]
147#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
148#[cfg_attr(feature = "serde", serde(default))]
149pub struct EraConfig {
150    /// Path to a local directory where ERA1 files are located.
151    ///
152    /// Conflicts with `url`.
153    pub path: Option<PathBuf>,
154    /// The base URL of an ERA1 file host to download from.
155    ///
156    /// Conflicts with `path`.
157    pub url: Option<Url>,
158    /// Path to a directory where files downloaded from `url` will be stored until processed.
159    ///
160    /// Required for `url`.
161    pub folder: Option<PathBuf>,
162}
163
164impl EraConfig {
165    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
166    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
167        self.folder = Some(dir.as_ref().join("era"));
168        self
169    }
170}
171
172/// Header stage configuration.
173#[derive(Debug, Clone, Copy, PartialEq, Eq)]
174#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
175#[cfg_attr(feature = "serde", serde(default))]
176pub struct HeadersConfig {
177    /// The maximum number of requests to send concurrently.
178    ///
179    /// Default: 100
180    pub downloader_max_concurrent_requests: usize,
181    /// The minimum number of requests to send concurrently.
182    ///
183    /// Default: 5
184    pub downloader_min_concurrent_requests: usize,
185    /// Maximum amount of responses to buffer internally.
186    /// The response contains multiple headers.
187    pub downloader_max_buffered_responses: usize,
188    /// The maximum number of headers to request from a peer at a time.
189    pub downloader_request_limit: u64,
190    /// The maximum number of headers to download before committing progress to the database.
191    pub commit_threshold: u64,
192}
193
194impl Default for HeadersConfig {
195    fn default() -> Self {
196        Self {
197            commit_threshold: 10_000,
198            downloader_request_limit: 1_000,
199            downloader_max_concurrent_requests: 100,
200            downloader_min_concurrent_requests: 5,
201            downloader_max_buffered_responses: 100,
202        }
203    }
204}
205
206/// Body stage configuration.
207#[derive(Debug, Clone, Copy, PartialEq, Eq)]
208#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
209#[cfg_attr(feature = "serde", serde(default))]
210pub struct BodiesConfig {
211    /// The batch size of non-empty blocks per one request
212    ///
213    /// Default: 200
214    pub downloader_request_limit: u64,
215    /// The maximum number of block bodies returned at once from the stream
216    ///
217    /// Default: `1_000`
218    pub downloader_stream_batch_size: usize,
219    /// The size of the internal block buffer in bytes.
220    ///
221    /// Default: 2GB
222    pub downloader_max_buffered_blocks_size_bytes: usize,
223    /// The minimum number of requests to send concurrently.
224    ///
225    /// Default: 5
226    pub downloader_min_concurrent_requests: usize,
227    /// The maximum number of requests to send concurrently.
228    /// This is equal to the max number of peers.
229    ///
230    /// Default: 100
231    pub downloader_max_concurrent_requests: usize,
232}
233
234impl Default for BodiesConfig {
235    fn default() -> Self {
236        Self {
237            downloader_request_limit: 200,
238            downloader_stream_batch_size: 1_000,
239            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
240            downloader_min_concurrent_requests: 5,
241            downloader_max_concurrent_requests: 100,
242        }
243    }
244}
245
246/// Sender recovery stage configuration.
247#[derive(Debug, Clone, Copy, PartialEq, Eq)]
248#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
249#[cfg_attr(feature = "serde", serde(default))]
250pub struct SenderRecoveryConfig {
251    /// The maximum number of transactions to process before committing progress to the database.
252    pub commit_threshold: u64,
253}
254
255impl Default for SenderRecoveryConfig {
256    fn default() -> Self {
257        Self { commit_threshold: 5_000_000 }
258    }
259}
260
261/// Execution stage configuration.
262#[derive(Debug, Clone, Copy, PartialEq, Eq)]
263#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
264#[cfg_attr(feature = "serde", serde(default))]
265pub struct ExecutionConfig {
266    /// The maximum number of blocks to process before the execution stage commits.
267    pub max_blocks: Option<u64>,
268    /// The maximum number of state changes to keep in memory before the execution stage commits.
269    pub max_changes: Option<u64>,
270    /// The maximum cumulative amount of gas to process before the execution stage commits.
271    pub max_cumulative_gas: Option<u64>,
272    /// The maximum time spent on blocks processing before the execution stage commits.
273    #[cfg_attr(
274        feature = "serde",
275        serde(
276            serialize_with = "humantime_serde::serialize",
277            deserialize_with = "deserialize_duration"
278        )
279    )]
280    pub max_duration: Option<Duration>,
281}
282
283impl Default for ExecutionConfig {
284    fn default() -> Self {
285        Self {
286            max_blocks: Some(500_000),
287            max_changes: Some(5_000_000),
288            // 50k full blocks of 30M gas
289            max_cumulative_gas: Some(30_000_000 * 50_000),
290            // 10 minutes
291            max_duration: Some(Duration::from_secs(10 * 60)),
292        }
293    }
294}
295
296impl From<ExecutionConfig> for ExecutionStageThresholds {
297    fn from(config: ExecutionConfig) -> Self {
298        Self {
299            max_blocks: config.max_blocks,
300            max_changes: config.max_changes,
301            max_cumulative_gas: config.max_cumulative_gas,
302            max_duration: config.max_duration,
303        }
304    }
305}
306
307/// Prune stage configuration.
308#[derive(Debug, Clone, Copy, PartialEq, Eq)]
309#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
310#[cfg_attr(feature = "serde", serde(default))]
311pub struct PruneStageConfig {
312    /// The maximum number of entries to prune before committing progress to the database.
313    pub commit_threshold: usize,
314}
315
316impl Default for PruneStageConfig {
317    fn default() -> Self {
318        Self { commit_threshold: 1_000_000 }
319    }
320}
321
322/// Hashing stage configuration.
323#[derive(Debug, Clone, Copy, PartialEq, Eq)]
324#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
325#[cfg_attr(feature = "serde", serde(default))]
326pub struct HashingConfig {
327    /// The threshold (in number of blocks) for switching between
328    /// incremental hashing and full hashing.
329    pub clean_threshold: u64,
330    /// The maximum number of entities to process before committing progress to the database.
331    pub commit_threshold: u64,
332}
333
334impl Default for HashingConfig {
335    fn default() -> Self {
336        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
337    }
338}
339
340/// Merkle stage configuration.
341#[derive(Debug, Clone, Copy, PartialEq, Eq)]
342#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
343#[cfg_attr(feature = "serde", serde(default))]
344pub struct MerkleConfig {
345    /// The number of blocks we will run the incremental root method for when we are catching up on
346    /// the merkle stage for a large number of blocks.
347    ///
348    /// When we are catching up for a large number of blocks, we can only run the incremental root
349    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
350    /// OOM. This number determines how many blocks in a row we will run the incremental root
351    /// method for.
352    pub incremental_threshold: u64,
353    /// The threshold (in number of blocks) for switching from incremental trie building of changes
354    /// to whole rebuild.
355    pub rebuild_threshold: u64,
356}
357
358impl Default for MerkleConfig {
359    fn default() -> Self {
360        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
361    }
362}
363
364/// Transaction Lookup stage configuration.
365#[derive(Debug, Clone, Copy, PartialEq, Eq)]
366#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
367#[cfg_attr(feature = "serde", serde(default))]
368pub struct TransactionLookupConfig {
369    /// The maximum number of transactions to process before writing to disk.
370    pub chunk_size: u64,
371}
372
373impl Default for TransactionLookupConfig {
374    fn default() -> Self {
375        Self { chunk_size: 5_000_000 }
376    }
377}
378
379/// Common ETL related configuration.
380#[derive(Debug, Clone, PartialEq, Eq)]
381#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
382#[cfg_attr(feature = "serde", serde(default))]
383pub struct EtlConfig {
384    /// Data directory where temporary files are created.
385    pub dir: Option<PathBuf>,
386    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
387    pub file_size: usize,
388}
389
390impl Default for EtlConfig {
391    fn default() -> Self {
392        Self { dir: None, file_size: Self::default_file_size() }
393    }
394}
395
396impl EtlConfig {
397    /// Creates an ETL configuration
398    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
399        Self { dir, file_size }
400    }
401
402    /// Return default ETL directory from datadir path.
403    pub fn from_datadir(path: &Path) -> PathBuf {
404        path.join("etl-tmp")
405    }
406
407    /// Default size in bytes of data held in memory before being flushed to disk as a file.
408    pub const fn default_file_size() -> usize {
409        // 500 MB
410        500 * (1024 * 1024)
411    }
412}
413
414/// History stage configuration.
415#[derive(Debug, Clone, Copy, PartialEq, Eq)]
416#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
417#[cfg_attr(feature = "serde", serde(default))]
418pub struct IndexHistoryConfig {
419    /// The maximum number of blocks to process before committing progress to the database.
420    pub commit_threshold: u64,
421}
422
423impl Default for IndexHistoryConfig {
424    fn default() -> Self {
425        Self { commit_threshold: 100_000 }
426    }
427}
428
429/// Pruning configuration.
430#[derive(Debug, Clone, PartialEq, Eq)]
431#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
432#[cfg_attr(feature = "serde", serde(default))]
433pub struct PruneConfig {
434    /// Minimum pruning interval measured in blocks.
435    pub block_interval: usize,
436    /// Pruning configuration for every part of the data that can be pruned.
437    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
438    pub segments: PruneModes,
439}
440
441impl Default for PruneConfig {
442    fn default() -> Self {
443        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::none() }
444    }
445}
446
447impl PruneConfig {
448    /// Returns whether there is any kind of receipt pruning configuration.
449    pub fn has_receipts_pruning(&self) -> bool {
450        self.segments.receipts.is_some() || !self.segments.receipts_log_filter.is_empty()
451    }
452
453    /// Merges another `PruneConfig` into this one, taking values from the other config if and only
454    /// if the corresponding value in this config is not set.
455    pub fn merge(&mut self, other: Option<Self>) {
456        let Some(other) = other else { return };
457        let Self {
458            block_interval,
459            segments:
460                PruneModes {
461                    sender_recovery,
462                    transaction_lookup,
463                    receipts,
464                    account_history,
465                    storage_history,
466                    bodies_history,
467                    receipts_log_filter,
468                },
469        } = other;
470
471        // Merge block_interval, only update if it's the default interval
472        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
473            self.block_interval = block_interval;
474        }
475
476        // Merge the various segment prune modes
477        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
478        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
479        self.segments.receipts = self.segments.receipts.or(receipts);
480        self.segments.account_history = self.segments.account_history.or(account_history);
481        self.segments.storage_history = self.segments.storage_history.or(storage_history);
482        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
483
484        if self.segments.receipts_log_filter.0.is_empty() && !receipts_log_filter.0.is_empty() {
485            self.segments.receipts_log_filter = receipts_log_filter;
486        }
487    }
488}
489
490/// Helper type to support older versions of Duration deserialization.
491#[cfg(feature = "serde")]
492fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
493where
494    D: serde::de::Deserializer<'de>,
495{
496    #[derive(serde::Deserialize)]
497    #[serde(untagged)]
498    enum AnyDuration {
499        #[serde(deserialize_with = "humantime_serde::deserialize")]
500        Human(Option<Duration>),
501        Duration(Option<Duration>),
502    }
503
504    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
505        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
506    })
507}
508
509#[cfg(all(test, feature = "serde"))]
510mod tests {
511    use super::{Config, EXTENSION};
512    use crate::PruneConfig;
513    use alloy_primitives::Address;
514    use reth_network_peers::TrustedPeer;
515    use reth_prune_types::{PruneMode, PruneModes, ReceiptsLogPruneConfig};
516    use std::{collections::BTreeMap, path::Path, str::FromStr, time::Duration};
517
518    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
519        let temp_dir = tempfile::tempdir().unwrap();
520        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
521
522        proc(&config_path);
523
524        temp_dir.close().unwrap()
525    }
526
527    /// Run a test function with a temporary config path as fixture.
528    fn with_config_path(test_fn: fn(&Path)) {
529        // Create a temporary directory for the config file
530        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
531        // Create the config file path
532        let config_path =
533            config_dir.path().join("example-app").join("example-config").with_extension("toml");
534        // Run the test function with the config path
535        test_fn(&config_path);
536        config_dir.close().expect("removing test fixture failed");
537    }
538
539    #[test]
540    fn test_load_path_works() {
541        with_config_path(|path| {
542            let config = Config::from_path(path).expect("load_path failed");
543            assert_eq!(config, Config::default());
544        })
545    }
546
547    #[test]
548    fn test_load_path_reads_existing_config() {
549        with_config_path(|path| {
550            let config = Config::default();
551
552            // Create the parent directory if it doesn't exist
553            if let Some(parent) = path.parent() {
554                std::fs::create_dir_all(parent).expect("Failed to create directories");
555            }
556
557            // Write the config to the file
558            std::fs::write(path, toml::to_string(&config).unwrap())
559                .expect("Failed to write config");
560
561            // Load the config from the file and compare it
562            let loaded = Config::from_path(path).expect("load_path failed");
563            assert_eq!(config, loaded);
564        })
565    }
566
567    #[test]
568    fn test_load_path_fails_on_invalid_toml() {
569        with_config_path(|path| {
570            let invalid_toml = "invalid toml data";
571
572            // Create the parent directory if it doesn't exist
573            if let Some(parent) = path.parent() {
574                std::fs::create_dir_all(parent).expect("Failed to create directories");
575            }
576
577            // Write invalid TOML data to the file
578            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
579
580            // Attempt to load the config should fail
581            let result = Config::from_path(path);
582            assert!(result.is_err());
583        })
584    }
585
586    #[test]
587    fn test_load_path_creates_directory_if_not_exists() {
588        with_config_path(|path| {
589            // Ensure the directory does not exist
590            let parent = path.parent().unwrap();
591            assert!(!parent.exists());
592
593            // Load the configuration, which should create the directory and a default config file
594            let config = Config::from_path(path).expect("load_path failed");
595            assert_eq!(config, Config::default());
596
597            // The directory and file should now exist
598            assert!(parent.exists());
599            assert!(path.exists());
600        });
601    }
602
603    #[test]
604    fn test_store_config() {
605        with_tempdir("config-store-test", |config_path| {
606            let config = Config::default();
607            std::fs::write(
608                config_path,
609                toml::to_string(&config).expect("Failed to serialize config"),
610            )
611            .expect("Failed to write config file");
612        })
613    }
614
615    #[test]
616    fn test_store_config_method() {
617        with_tempdir("config-store-test-method", |config_path| {
618            let config = Config::default();
619            config.save(config_path).expect("Failed to store config");
620        })
621    }
622
623    #[test]
624    fn test_load_config() {
625        with_tempdir("config-load-test", |config_path| {
626            let config = Config::default();
627
628            // Write the config to a file
629            std::fs::write(
630                config_path,
631                toml::to_string(&config).expect("Failed to serialize config"),
632            )
633            .expect("Failed to write config file");
634
635            // Load the config from the file
636            let loaded_config = Config::from_path(config_path).unwrap();
637
638            // Compare the loaded config with the original config
639            assert_eq!(config, loaded_config);
640        })
641    }
642
643    #[test]
644    fn test_load_execution_stage() {
645        with_tempdir("config-load-test", |config_path| {
646            let mut config = Config::default();
647            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
648
649            // Write the config to a file
650            std::fs::write(
651                config_path,
652                toml::to_string(&config).expect("Failed to serialize config"),
653            )
654            .expect("Failed to write config file");
655
656            // Load the config from the file
657            let loaded_config = Config::from_path(config_path).unwrap();
658
659            // Compare the loaded config with the original config
660            assert_eq!(config, loaded_config);
661        })
662    }
663
664    // ensures config deserialization is backwards compatible
665    #[test]
666    fn test_backwards_compatibility() {
667        let alpha_0_0_8 = r"#
668[stages.headers]
669downloader_max_concurrent_requests = 100
670downloader_min_concurrent_requests = 5
671downloader_max_buffered_responses = 100
672downloader_request_limit = 1000
673commit_threshold = 10000
674
675[stages.bodies]
676downloader_request_limit = 200
677downloader_stream_batch_size = 1000
678downloader_max_buffered_blocks_size_bytes = 2147483648
679downloader_min_concurrent_requests = 5
680downloader_max_concurrent_requests = 100
681
682[stages.sender_recovery]
683commit_threshold = 5000000
684
685[stages.execution]
686max_blocks = 500000
687max_changes = 5000000
688
689[stages.account_hashing]
690clean_threshold = 500000
691commit_threshold = 100000
692
693[stages.storage_hashing]
694clean_threshold = 500000
695commit_threshold = 100000
696
697[stages.merkle]
698clean_threshold = 50000
699
700[stages.transaction_lookup]
701chunk_size = 5000000
702
703[stages.index_account_history]
704commit_threshold = 100000
705
706[stages.index_storage_history]
707commit_threshold = 100000
708
709[peers]
710refill_slots_interval = '1s'
711trusted_nodes = []
712connect_trusted_nodes_only = false
713max_backoff_count = 5
714ban_duration = '12h'
715
716[peers.connection_info]
717max_outbound = 100
718max_inbound = 30
719
720[peers.reputation_weights]
721bad_message = -16384
722bad_block = -16384
723bad_transactions = -16384
724already_seen_transactions = 0
725timeout = -4096
726bad_protocol = -2147483648
727failed_to_connect = -25600
728dropped = -4096
729
730[peers.backoff_durations]
731low = '30s'
732medium = '3m'
733high = '15m'
734max = '1h'
735
736[sessions]
737session_command_buffer = 32
738session_event_buffer = 260
739
740[sessions.limits]
741
742[sessions.initial_internal_request_timeout]
743secs = 20
744nanos = 0
745
746[sessions.protocol_breach_request_timeout]
747secs = 120
748nanos = 0
749
750[prune]
751block_interval = 5
752
753[prune.parts]
754sender_recovery = { distance = 16384 }
755transaction_lookup = 'full'
756receipts = { before = 1920000 }
757account_history = { distance = 16384 }
758storage_history = { distance = 16384 }
759[prune.parts.receipts_log_filter]
760'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
761'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
762#";
763        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
764
765        let alpha_0_0_11 = r"#
766[prune.segments]
767sender_recovery = { distance = 16384 }
768transaction_lookup = 'full'
769receipts = { before = 1920000 }
770account_history = { distance = 16384 }
771storage_history = { distance = 16384 }
772[prune.segments.receipts_log_filter]
773'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
774'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
775#";
776        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
777
778        let alpha_0_0_18 = r"#
779[stages.headers]
780downloader_max_concurrent_requests = 100
781downloader_min_concurrent_requests = 5
782downloader_max_buffered_responses = 100
783downloader_request_limit = 1000
784commit_threshold = 10000
785
786[stages.total_difficulty]
787commit_threshold = 100000
788
789[stages.bodies]
790downloader_request_limit = 200
791downloader_stream_batch_size = 1000
792downloader_max_buffered_blocks_size_bytes = 2147483648
793downloader_min_concurrent_requests = 5
794downloader_max_concurrent_requests = 100
795
796[stages.sender_recovery]
797commit_threshold = 5000000
798
799[stages.execution]
800max_blocks = 500000
801max_changes = 5000000
802max_cumulative_gas = 1500000000000
803[stages.execution.max_duration]
804secs = 600
805nanos = 0
806
807[stages.account_hashing]
808clean_threshold = 500000
809commit_threshold = 100000
810
811[stages.storage_hashing]
812clean_threshold = 500000
813commit_threshold = 100000
814
815[stages.merkle]
816clean_threshold = 50000
817
818[stages.transaction_lookup]
819commit_threshold = 5000000
820
821[stages.index_account_history]
822commit_threshold = 100000
823
824[stages.index_storage_history]
825commit_threshold = 100000
826
827[peers]
828refill_slots_interval = '5s'
829trusted_nodes = []
830connect_trusted_nodes_only = false
831max_backoff_count = 5
832ban_duration = '12h'
833
834[peers.connection_info]
835max_outbound = 100
836max_inbound = 30
837max_concurrent_outbound_dials = 10
838
839[peers.reputation_weights]
840bad_message = -16384
841bad_block = -16384
842bad_transactions = -16384
843already_seen_transactions = 0
844timeout = -4096
845bad_protocol = -2147483648
846failed_to_connect = -25600
847dropped = -4096
848bad_announcement = -1024
849
850[peers.backoff_durations]
851low = '30s'
852medium = '3m'
853high = '15m'
854max = '1h'
855
856[sessions]
857session_command_buffer = 32
858session_event_buffer = 260
859
860[sessions.limits]
861
862[sessions.initial_internal_request_timeout]
863secs = 20
864nanos = 0
865
866[sessions.protocol_breach_request_timeout]
867secs = 120
868nanos = 0
869#";
870        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
871        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
872
873        let alpha_0_0_19 = r"#
874[stages.headers]
875downloader_max_concurrent_requests = 100
876downloader_min_concurrent_requests = 5
877downloader_max_buffered_responses = 100
878downloader_request_limit = 1000
879commit_threshold = 10000
880
881[stages.total_difficulty]
882commit_threshold = 100000
883
884[stages.bodies]
885downloader_request_limit = 200
886downloader_stream_batch_size = 1000
887downloader_max_buffered_blocks_size_bytes = 2147483648
888downloader_min_concurrent_requests = 5
889downloader_max_concurrent_requests = 100
890
891[stages.sender_recovery]
892commit_threshold = 5000000
893
894[stages.execution]
895max_blocks = 500000
896max_changes = 5000000
897max_cumulative_gas = 1500000000000
898max_duration = '10m'
899
900[stages.account_hashing]
901clean_threshold = 500000
902commit_threshold = 100000
903
904[stages.storage_hashing]
905clean_threshold = 500000
906commit_threshold = 100000
907
908[stages.merkle]
909clean_threshold = 50000
910
911[stages.transaction_lookup]
912commit_threshold = 5000000
913
914[stages.index_account_history]
915commit_threshold = 100000
916
917[stages.index_storage_history]
918commit_threshold = 100000
919
920[peers]
921refill_slots_interval = '5s'
922trusted_nodes = []
923connect_trusted_nodes_only = false
924max_backoff_count = 5
925ban_duration = '12h'
926
927[peers.connection_info]
928max_outbound = 100
929max_inbound = 30
930max_concurrent_outbound_dials = 10
931
932[peers.reputation_weights]
933bad_message = -16384
934bad_block = -16384
935bad_transactions = -16384
936already_seen_transactions = 0
937timeout = -4096
938bad_protocol = -2147483648
939failed_to_connect = -25600
940dropped = -4096
941bad_announcement = -1024
942
943[peers.backoff_durations]
944low = '30s'
945medium = '3m'
946high = '15m'
947max = '1h'
948
949[sessions]
950session_command_buffer = 32
951session_event_buffer = 260
952
953[sessions.limits]
954
955[sessions.initial_internal_request_timeout]
956secs = 20
957nanos = 0
958
959[sessions.protocol_breach_request_timeout]
960secs = 120
961nanos = 0
962#";
963        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
964    }
965
966    // ensures prune config deserialization is backwards compatible
967    #[test]
968    fn test_backwards_compatibility_prune_full() {
969        let s = r"#
970[prune]
971block_interval = 5
972
973[prune.segments]
974sender_recovery = { distance = 16384 }
975transaction_lookup = 'full'
976receipts = { distance = 16384 }
977#";
978        let _conf: Config = toml::from_str(s).unwrap();
979
980        let s = r"#
981[prune]
982block_interval = 5
983
984[prune.segments]
985sender_recovery = { distance = 16384 }
986transaction_lookup = 'full'
987receipts = 'full'
988#";
989        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
990        assert!(err.contains("invalid value: string \"full\""), "{}", err);
991    }
992
993    #[test]
994    fn test_prune_config_merge() {
995        let mut config1 = PruneConfig {
996            block_interval: 5,
997            segments: PruneModes {
998                sender_recovery: Some(PruneMode::Full),
999                transaction_lookup: None,
1000                receipts: Some(PruneMode::Distance(1000)),
1001                account_history: None,
1002                storage_history: Some(PruneMode::Before(5000)),
1003                bodies_history: None,
1004                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([(
1005                    Address::random(),
1006                    PruneMode::Full,
1007                )])),
1008            },
1009        };
1010
1011        let config2 = PruneConfig {
1012            block_interval: 10,
1013            segments: PruneModes {
1014                sender_recovery: Some(PruneMode::Distance(500)),
1015                transaction_lookup: Some(PruneMode::Full),
1016                receipts: Some(PruneMode::Full),
1017                account_history: Some(PruneMode::Distance(2000)),
1018                storage_history: Some(PruneMode::Distance(3000)),
1019                bodies_history: None,
1020                receipts_log_filter: ReceiptsLogPruneConfig(BTreeMap::from([
1021                    (Address::random(), PruneMode::Distance(1000)),
1022                    (Address::random(), PruneMode::Before(2000)),
1023                ])),
1024            },
1025        };
1026
1027        let original_filter = config1.segments.receipts_log_filter.clone();
1028        config1.merge(Some(config2));
1029
1030        // Check that the configuration has been merged. Any configuration present in config1
1031        // should not be overwritten by config2
1032        assert_eq!(config1.block_interval, 10);
1033        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1034        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1035        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1036        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1037        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1038        assert_eq!(config1.segments.receipts_log_filter, original_filter);
1039    }
1040
1041    #[test]
1042    fn test_conf_trust_nodes_only() {
1043        let trusted_nodes_only = r"#
1044[peers]
1045trusted_nodes_only = true
1046#";
1047        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1048        assert!(conf.peers.trusted_nodes_only);
1049
1050        let trusted_nodes_only = r"#
1051[peers]
1052connect_trusted_nodes_only = true
1053#";
1054        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1055        assert!(conf.peers.trusted_nodes_only);
1056    }
1057
1058    #[test]
1059    fn test_can_support_dns_in_trusted_nodes() {
1060        let reth_toml = r#"
1061    [peers]
1062    trusted_nodes = [
1063        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1064        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1065    ]
1066    "#;
1067
1068        let conf: Config = toml::from_str(reth_toml).unwrap();
1069        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1070
1071        let expected_enodes = vec![
1072            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1073            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1074        ];
1075
1076        for enode in expected_enodes {
1077            let node = TrustedPeer::from_str(enode).unwrap();
1078            assert!(conf.peers.trusted_nodes.contains(&node));
1079        }
1080    }
1081}