reth_config/
config.rs

1//! Configuration files.
2use reth_network_types::{PeersConfig, SessionsConfig};
3use reth_prune_types::PruneModes;
4use reth_stages_types::ExecutionStageThresholds;
5use std::{
6    path::{Path, PathBuf},
7    time::Duration,
8};
9use url::Url;
10
11#[cfg(feature = "serde")]
12const EXTENSION: &str = "toml";
13
14/// The default prune block interval
15pub const DEFAULT_BLOCK_INTERVAL: usize = 5;
16
17/// Configuration for the reth node.
18#[derive(Debug, Clone, Default, PartialEq, Eq)]
19#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
20#[cfg_attr(feature = "serde", serde(default))]
21pub struct Config {
22    /// Configuration for each stage in the pipeline.
23    // TODO(onbjerg): Can we make this easier to maintain when we add/remove stages?
24    pub stages: StageConfig,
25    /// Configuration for pruning.
26    #[cfg_attr(feature = "serde", serde(default))]
27    pub prune: PruneConfig,
28    /// Configuration for the discovery service.
29    pub peers: PeersConfig,
30    /// Configuration for peer sessions.
31    pub sessions: SessionsConfig,
32}
33
34impl Config {
35    /// Sets the pruning configuration.
36    pub const fn set_prune_config(&mut self, prune_config: PruneConfig) {
37        self.prune = prune_config;
38    }
39}
40
41#[cfg(feature = "serde")]
42impl Config {
43    /// Load a [`Config`] from a specified path.
44    ///
45    /// A new configuration file is created with default values if none
46    /// exists.
47    pub fn from_path(path: impl AsRef<Path>) -> eyre::Result<Self> {
48        let path = path.as_ref();
49        match std::fs::read_to_string(path) {
50            Ok(cfg_string) => {
51                toml::from_str(&cfg_string).map_err(|e| eyre::eyre!("Failed to parse TOML: {e}"))
52            }
53            Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
54                if let Some(parent) = path.parent() {
55                    std::fs::create_dir_all(parent)
56                        .map_err(|e| eyre::eyre!("Failed to create directory: {e}"))?;
57                }
58                let cfg = Self::default();
59                let s = toml::to_string_pretty(&cfg)
60                    .map_err(|e| eyre::eyre!("Failed to serialize to TOML: {e}"))?;
61                std::fs::write(path, s)
62                    .map_err(|e| eyre::eyre!("Failed to write configuration file: {e}"))?;
63                Ok(cfg)
64            }
65            Err(e) => Err(eyre::eyre!("Failed to load configuration: {e}")),
66        }
67    }
68
69    /// Returns the [`PeersConfig`] for the node.
70    ///
71    /// If a peers file is provided, the basic nodes from the file are added to the configuration.
72    pub fn peers_config_with_basic_nodes_from_file(
73        &self,
74        peers_file: Option<&Path>,
75    ) -> PeersConfig {
76        self.peers
77            .clone()
78            .with_basic_nodes_from_file(peers_file)
79            .unwrap_or_else(|_| self.peers.clone())
80    }
81
82    /// Save the configuration to toml file.
83    pub fn save(&self, path: &Path) -> Result<(), std::io::Error> {
84        if path.extension() != Some(std::ffi::OsStr::new(EXTENSION)) {
85            return Err(std::io::Error::new(
86                std::io::ErrorKind::InvalidInput,
87                format!("reth config file extension must be '{EXTENSION}'"),
88            ));
89        }
90
91        std::fs::write(
92            path,
93            toml::to_string(self)
94                .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()))?,
95        )
96    }
97}
98
99/// Configuration for each stage in the pipeline.
100#[derive(Debug, Clone, Default, PartialEq, Eq)]
101#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
102#[cfg_attr(feature = "serde", serde(default))]
103pub struct StageConfig {
104    /// ERA stage configuration.
105    pub era: EraConfig,
106    /// Header stage configuration.
107    pub headers: HeadersConfig,
108    /// Body stage configuration.
109    pub bodies: BodiesConfig,
110    /// Sender Recovery stage configuration.
111    pub sender_recovery: SenderRecoveryConfig,
112    /// Execution stage configuration.
113    pub execution: ExecutionConfig,
114    /// Prune stage configuration.
115    pub prune: PruneStageConfig,
116    /// Account Hashing stage configuration.
117    pub account_hashing: HashingConfig,
118    /// Storage Hashing stage configuration.
119    pub storage_hashing: HashingConfig,
120    /// Merkle stage configuration.
121    pub merkle: MerkleConfig,
122    /// Transaction Lookup stage configuration.
123    pub transaction_lookup: TransactionLookupConfig,
124    /// Index Account History stage configuration.
125    pub index_account_history: IndexHistoryConfig,
126    /// Index Storage History stage configuration.
127    pub index_storage_history: IndexHistoryConfig,
128    /// Common ETL related configuration.
129    pub etl: EtlConfig,
130}
131
132impl StageConfig {
133    /// The highest threshold (in number of blocks) for switching between incremental and full
134    /// calculations across `MerkleStage`, `AccountHashingStage` and `StorageHashingStage`. This is
135    /// required to figure out if can prune or not changesets on subsequent pipeline runs during
136    /// `ExecutionStage`
137    pub fn execution_external_clean_threshold(&self) -> u64 {
138        self.merkle
139            .incremental_threshold
140            .max(self.account_hashing.clean_threshold)
141            .max(self.storage_hashing.clean_threshold)
142    }
143}
144
145/// ERA stage configuration.
146#[derive(Debug, Clone, Default, PartialEq, Eq)]
147#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
148#[cfg_attr(feature = "serde", serde(default))]
149pub struct EraConfig {
150    /// Path to a local directory where ERA1 files are located.
151    ///
152    /// Conflicts with `url`.
153    pub path: Option<PathBuf>,
154    /// The base URL of an ERA1 file host to download from.
155    ///
156    /// Conflicts with `path`.
157    pub url: Option<Url>,
158    /// Path to a directory where files downloaded from `url` will be stored until processed.
159    ///
160    /// Required for `url`.
161    pub folder: Option<PathBuf>,
162}
163
164impl EraConfig {
165    /// Sets `folder` for temporary downloads as a directory called "era" inside `dir`.
166    pub fn with_datadir(mut self, dir: impl AsRef<Path>) -> Self {
167        self.folder = Some(dir.as_ref().join("era"));
168        self
169    }
170}
171
172/// Header stage configuration.
173#[derive(Debug, Clone, Copy, PartialEq, Eq)]
174#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
175#[cfg_attr(feature = "serde", serde(default))]
176pub struct HeadersConfig {
177    /// The maximum number of requests to send concurrently.
178    ///
179    /// Default: 100
180    pub downloader_max_concurrent_requests: usize,
181    /// The minimum number of requests to send concurrently.
182    ///
183    /// Default: 5
184    pub downloader_min_concurrent_requests: usize,
185    /// Maximum amount of responses to buffer internally.
186    /// The response contains multiple headers.
187    pub downloader_max_buffered_responses: usize,
188    /// The maximum number of headers to request from a peer at a time.
189    pub downloader_request_limit: u64,
190    /// The maximum number of headers to download before committing progress to the database.
191    pub commit_threshold: u64,
192}
193
194impl Default for HeadersConfig {
195    fn default() -> Self {
196        Self {
197            commit_threshold: 10_000,
198            downloader_request_limit: 1_000,
199            downloader_max_concurrent_requests: 100,
200            downloader_min_concurrent_requests: 5,
201            downloader_max_buffered_responses: 100,
202        }
203    }
204}
205
206/// Body stage configuration.
207#[derive(Debug, Clone, Copy, PartialEq, Eq)]
208#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
209#[cfg_attr(feature = "serde", serde(default))]
210pub struct BodiesConfig {
211    /// The batch size of non-empty blocks per one request
212    ///
213    /// Default: 200
214    pub downloader_request_limit: u64,
215    /// The maximum number of block bodies returned at once from the stream
216    ///
217    /// Default: `1_000`
218    pub downloader_stream_batch_size: usize,
219    /// The size of the internal block buffer in bytes.
220    ///
221    /// Default: 2GB
222    pub downloader_max_buffered_blocks_size_bytes: usize,
223    /// The minimum number of requests to send concurrently.
224    ///
225    /// Default: 5
226    pub downloader_min_concurrent_requests: usize,
227    /// The maximum number of requests to send concurrently.
228    /// This is equal to the max number of peers.
229    ///
230    /// Default: 100
231    pub downloader_max_concurrent_requests: usize,
232}
233
234impl Default for BodiesConfig {
235    fn default() -> Self {
236        Self {
237            downloader_request_limit: 200,
238            downloader_stream_batch_size: 1_000,
239            downloader_max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
240            downloader_min_concurrent_requests: 5,
241            downloader_max_concurrent_requests: 100,
242        }
243    }
244}
245
246/// Sender recovery stage configuration.
247#[derive(Debug, Clone, Copy, PartialEq, Eq)]
248#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
249#[cfg_attr(feature = "serde", serde(default))]
250pub struct SenderRecoveryConfig {
251    /// The maximum number of transactions to process before committing progress to the database.
252    pub commit_threshold: u64,
253}
254
255impl Default for SenderRecoveryConfig {
256    fn default() -> Self {
257        Self { commit_threshold: 5_000_000 }
258    }
259}
260
261/// Execution stage configuration.
262#[derive(Debug, Clone, Copy, PartialEq, Eq)]
263#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
264#[cfg_attr(feature = "serde", serde(default))]
265pub struct ExecutionConfig {
266    /// The maximum number of blocks to process before the execution stage commits.
267    pub max_blocks: Option<u64>,
268    /// The maximum number of state changes to keep in memory before the execution stage commits.
269    pub max_changes: Option<u64>,
270    /// The maximum cumulative amount of gas to process before the execution stage commits.
271    pub max_cumulative_gas: Option<u64>,
272    /// The maximum time spent on blocks processing before the execution stage commits.
273    #[cfg_attr(
274        feature = "serde",
275        serde(
276            serialize_with = "humantime_serde::serialize",
277            deserialize_with = "deserialize_duration"
278        )
279    )]
280    pub max_duration: Option<Duration>,
281}
282
283impl Default for ExecutionConfig {
284    fn default() -> Self {
285        Self {
286            max_blocks: Some(500_000),
287            max_changes: Some(5_000_000),
288            // 50k full blocks of 30M gas
289            max_cumulative_gas: Some(30_000_000 * 50_000),
290            // 10 minutes
291            max_duration: Some(Duration::from_secs(10 * 60)),
292        }
293    }
294}
295
296impl From<ExecutionConfig> for ExecutionStageThresholds {
297    fn from(config: ExecutionConfig) -> Self {
298        Self {
299            max_blocks: config.max_blocks,
300            max_changes: config.max_changes,
301            max_cumulative_gas: config.max_cumulative_gas,
302            max_duration: config.max_duration,
303        }
304    }
305}
306
307/// Prune stage configuration.
308#[derive(Debug, Clone, Copy, PartialEq, Eq)]
309#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
310#[cfg_attr(feature = "serde", serde(default))]
311pub struct PruneStageConfig {
312    /// The maximum number of entries to prune before committing progress to the database.
313    pub commit_threshold: usize,
314}
315
316impl Default for PruneStageConfig {
317    fn default() -> Self {
318        Self { commit_threshold: 1_000_000 }
319    }
320}
321
322/// Hashing stage configuration.
323#[derive(Debug, Clone, Copy, PartialEq, Eq)]
324#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
325#[cfg_attr(feature = "serde", serde(default))]
326pub struct HashingConfig {
327    /// The threshold (in number of blocks) for switching between
328    /// incremental hashing and full hashing.
329    pub clean_threshold: u64,
330    /// The maximum number of entities to process before committing progress to the database.
331    pub commit_threshold: u64,
332}
333
334impl Default for HashingConfig {
335    fn default() -> Self {
336        Self { clean_threshold: 500_000, commit_threshold: 100_000 }
337    }
338}
339
340/// Merkle stage configuration.
341#[derive(Debug, Clone, Copy, PartialEq, Eq)]
342#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
343#[cfg_attr(feature = "serde", serde(default))]
344pub struct MerkleConfig {
345    /// The number of blocks we will run the incremental root method for when we are catching up on
346    /// the merkle stage for a large number of blocks.
347    ///
348    /// When we are catching up for a large number of blocks, we can only run the incremental root
349    /// for a limited number of blocks, otherwise the incremental root method may cause the node to
350    /// OOM. This number determines how many blocks in a row we will run the incremental root
351    /// method for.
352    pub incremental_threshold: u64,
353    /// The threshold (in number of blocks) for switching from incremental trie building of changes
354    /// to whole rebuild.
355    pub rebuild_threshold: u64,
356}
357
358impl Default for MerkleConfig {
359    fn default() -> Self {
360        Self { incremental_threshold: 7_000, rebuild_threshold: 100_000 }
361    }
362}
363
364/// Transaction Lookup stage configuration.
365#[derive(Debug, Clone, Copy, PartialEq, Eq)]
366#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
367#[cfg_attr(feature = "serde", serde(default))]
368pub struct TransactionLookupConfig {
369    /// The maximum number of transactions to process before writing to disk.
370    pub chunk_size: u64,
371}
372
373impl Default for TransactionLookupConfig {
374    fn default() -> Self {
375        Self { chunk_size: 5_000_000 }
376    }
377}
378
379/// Common ETL related configuration.
380#[derive(Debug, Clone, PartialEq, Eq)]
381#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
382#[cfg_attr(feature = "serde", serde(default))]
383pub struct EtlConfig {
384    /// Data directory where temporary files are created.
385    pub dir: Option<PathBuf>,
386    /// The maximum size in bytes of data held in memory before being flushed to disk as a file.
387    pub file_size: usize,
388}
389
390impl Default for EtlConfig {
391    fn default() -> Self {
392        Self { dir: None, file_size: Self::default_file_size() }
393    }
394}
395
396impl EtlConfig {
397    /// Creates an ETL configuration
398    pub const fn new(dir: Option<PathBuf>, file_size: usize) -> Self {
399        Self { dir, file_size }
400    }
401
402    /// Return default ETL directory from datadir path.
403    pub fn from_datadir(path: &Path) -> PathBuf {
404        path.join("etl-tmp")
405    }
406
407    /// Default size in bytes of data held in memory before being flushed to disk as a file.
408    pub const fn default_file_size() -> usize {
409        // 500 MB
410        500 * (1024 * 1024)
411    }
412}
413
414/// History stage configuration.
415#[derive(Debug, Clone, Copy, PartialEq, Eq)]
416#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
417#[cfg_attr(feature = "serde", serde(default))]
418pub struct IndexHistoryConfig {
419    /// The maximum number of blocks to process before committing progress to the database.
420    pub commit_threshold: u64,
421}
422
423impl Default for IndexHistoryConfig {
424    fn default() -> Self {
425        Self { commit_threshold: 100_000 }
426    }
427}
428
429/// Pruning configuration.
430#[derive(Debug, Clone, PartialEq, Eq)]
431#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))]
432#[cfg_attr(feature = "serde", serde(default))]
433pub struct PruneConfig {
434    /// Minimum pruning interval measured in blocks.
435    pub block_interval: usize,
436    /// Pruning configuration for every part of the data that can be pruned.
437    #[cfg_attr(feature = "serde", serde(alias = "parts"))]
438    pub segments: PruneModes,
439}
440
441impl Default for PruneConfig {
442    fn default() -> Self {
443        Self { block_interval: DEFAULT_BLOCK_INTERVAL, segments: PruneModes::default() }
444    }
445}
446
447impl PruneConfig {
448    /// Returns whether this configuration is the default one.
449    pub fn is_default(&self) -> bool {
450        self == &Self::default()
451    }
452
453    /// Returns whether there is any kind of receipt pruning configuration.
454    pub const fn has_receipts_pruning(&self) -> bool {
455        self.segments.receipts.is_some()
456    }
457
458    /// Merges another `PruneConfig` into this one, taking values from the other config if and only
459    /// if the corresponding value in this config is not set.
460    pub fn merge(&mut self, other: Self) {
461        #[expect(deprecated)]
462        let Self {
463            block_interval,
464            segments:
465                PruneModes {
466                    sender_recovery,
467                    transaction_lookup,
468                    receipts,
469                    account_history,
470                    storage_history,
471                    bodies_history,
472                    merkle_changesets,
473                    receipts_log_filter: (),
474                },
475        } = other;
476
477        // Merge block_interval, only update if it's the default interval
478        if self.block_interval == DEFAULT_BLOCK_INTERVAL {
479            self.block_interval = block_interval;
480        }
481
482        // Merge the various segment prune modes
483        self.segments.sender_recovery = self.segments.sender_recovery.or(sender_recovery);
484        self.segments.transaction_lookup = self.segments.transaction_lookup.or(transaction_lookup);
485        self.segments.receipts = self.segments.receipts.or(receipts);
486        self.segments.account_history = self.segments.account_history.or(account_history);
487        self.segments.storage_history = self.segments.storage_history.or(storage_history);
488        self.segments.bodies_history = self.segments.bodies_history.or(bodies_history);
489        // Merkle changesets is not optional, so we just replace it if provided
490        self.segments.merkle_changesets = merkle_changesets;
491    }
492}
493
494/// Helper type to support older versions of Duration deserialization.
495#[cfg(feature = "serde")]
496fn deserialize_duration<'de, D>(deserializer: D) -> Result<Option<Duration>, D::Error>
497where
498    D: serde::de::Deserializer<'de>,
499{
500    #[derive(serde::Deserialize)]
501    #[serde(untagged)]
502    enum AnyDuration {
503        #[serde(deserialize_with = "humantime_serde::deserialize")]
504        Human(Option<Duration>),
505        Duration(Option<Duration>),
506    }
507
508    <AnyDuration as serde::Deserialize>::deserialize(deserializer).map(|d| match d {
509        AnyDuration::Human(duration) | AnyDuration::Duration(duration) => duration,
510    })
511}
512
513#[cfg(all(test, feature = "serde"))]
514mod tests {
515    use super::{Config, EXTENSION};
516    use crate::PruneConfig;
517    use reth_network_peers::TrustedPeer;
518    use reth_prune_types::{PruneMode, PruneModes};
519    use std::{path::Path, str::FromStr, time::Duration};
520
521    fn with_tempdir(filename: &str, proc: fn(&std::path::Path)) {
522        let temp_dir = tempfile::tempdir().unwrap();
523        let config_path = temp_dir.path().join(filename).with_extension(EXTENSION);
524
525        proc(&config_path);
526
527        temp_dir.close().unwrap()
528    }
529
530    /// Run a test function with a temporary config path as fixture.
531    fn with_config_path(test_fn: fn(&Path)) {
532        // Create a temporary directory for the config file
533        let config_dir = tempfile::tempdir().expect("creating test fixture failed");
534        // Create the config file path
535        let config_path =
536            config_dir.path().join("example-app").join("example-config").with_extension("toml");
537        // Run the test function with the config path
538        test_fn(&config_path);
539        config_dir.close().expect("removing test fixture failed");
540    }
541
542    #[test]
543    fn test_load_path_works() {
544        with_config_path(|path| {
545            let config = Config::from_path(path).expect("load_path failed");
546            assert_eq!(config, Config::default());
547        })
548    }
549
550    #[test]
551    fn test_load_path_reads_existing_config() {
552        with_config_path(|path| {
553            let config = Config::default();
554
555            // Create the parent directory if it doesn't exist
556            if let Some(parent) = path.parent() {
557                std::fs::create_dir_all(parent).expect("Failed to create directories");
558            }
559
560            // Write the config to the file
561            std::fs::write(path, toml::to_string(&config).unwrap())
562                .expect("Failed to write config");
563
564            // Load the config from the file and compare it
565            let loaded = Config::from_path(path).expect("load_path failed");
566            assert_eq!(config, loaded);
567        })
568    }
569
570    #[test]
571    fn test_load_path_fails_on_invalid_toml() {
572        with_config_path(|path| {
573            let invalid_toml = "invalid toml data";
574
575            // Create the parent directory if it doesn't exist
576            if let Some(parent) = path.parent() {
577                std::fs::create_dir_all(parent).expect("Failed to create directories");
578            }
579
580            // Write invalid TOML data to the file
581            std::fs::write(path, invalid_toml).expect("Failed to write invalid TOML");
582
583            // Attempt to load the config should fail
584            let result = Config::from_path(path);
585            assert!(result.is_err());
586        })
587    }
588
589    #[test]
590    fn test_load_path_creates_directory_if_not_exists() {
591        with_config_path(|path| {
592            // Ensure the directory does not exist
593            let parent = path.parent().unwrap();
594            assert!(!parent.exists());
595
596            // Load the configuration, which should create the directory and a default config file
597            let config = Config::from_path(path).expect("load_path failed");
598            assert_eq!(config, Config::default());
599
600            // The directory and file should now exist
601            assert!(parent.exists());
602            assert!(path.exists());
603        });
604    }
605
606    #[test]
607    fn test_store_config() {
608        with_tempdir("config-store-test", |config_path| {
609            let config = Config::default();
610            std::fs::write(
611                config_path,
612                toml::to_string(&config).expect("Failed to serialize config"),
613            )
614            .expect("Failed to write config file");
615        })
616    }
617
618    #[test]
619    fn test_store_config_method() {
620        with_tempdir("config-store-test-method", |config_path| {
621            let config = Config::default();
622            config.save(config_path).expect("Failed to store config");
623        })
624    }
625
626    #[test]
627    fn test_load_config() {
628        with_tempdir("config-load-test", |config_path| {
629            let config = Config::default();
630
631            // Write the config to a file
632            std::fs::write(
633                config_path,
634                toml::to_string(&config).expect("Failed to serialize config"),
635            )
636            .expect("Failed to write config file");
637
638            // Load the config from the file
639            let loaded_config = Config::from_path(config_path).unwrap();
640
641            // Compare the loaded config with the original config
642            assert_eq!(config, loaded_config);
643        })
644    }
645
646    #[test]
647    fn test_load_execution_stage() {
648        with_tempdir("config-load-test", |config_path| {
649            let mut config = Config::default();
650            config.stages.execution.max_duration = Some(Duration::from_secs(10 * 60));
651
652            // Write the config to a file
653            std::fs::write(
654                config_path,
655                toml::to_string(&config).expect("Failed to serialize config"),
656            )
657            .expect("Failed to write config file");
658
659            // Load the config from the file
660            let loaded_config = Config::from_path(config_path).unwrap();
661
662            // Compare the loaded config with the original config
663            assert_eq!(config, loaded_config);
664        })
665    }
666
667    // ensures config deserialization is backwards compatible
668    #[test]
669    fn test_backwards_compatibility() {
670        let alpha_0_0_8 = r"#
671[stages.headers]
672downloader_max_concurrent_requests = 100
673downloader_min_concurrent_requests = 5
674downloader_max_buffered_responses = 100
675downloader_request_limit = 1000
676commit_threshold = 10000
677
678[stages.bodies]
679downloader_request_limit = 200
680downloader_stream_batch_size = 1000
681downloader_max_buffered_blocks_size_bytes = 2147483648
682downloader_min_concurrent_requests = 5
683downloader_max_concurrent_requests = 100
684
685[stages.sender_recovery]
686commit_threshold = 5000000
687
688[stages.execution]
689max_blocks = 500000
690max_changes = 5000000
691
692[stages.account_hashing]
693clean_threshold = 500000
694commit_threshold = 100000
695
696[stages.storage_hashing]
697clean_threshold = 500000
698commit_threshold = 100000
699
700[stages.merkle]
701clean_threshold = 50000
702
703[stages.transaction_lookup]
704chunk_size = 5000000
705
706[stages.index_account_history]
707commit_threshold = 100000
708
709[stages.index_storage_history]
710commit_threshold = 100000
711
712[peers]
713refill_slots_interval = '1s'
714trusted_nodes = []
715connect_trusted_nodes_only = false
716max_backoff_count = 5
717ban_duration = '12h'
718
719[peers.connection_info]
720max_outbound = 100
721max_inbound = 30
722
723[peers.reputation_weights]
724bad_message = -16384
725bad_block = -16384
726bad_transactions = -16384
727already_seen_transactions = 0
728timeout = -4096
729bad_protocol = -2147483648
730failed_to_connect = -25600
731dropped = -4096
732
733[peers.backoff_durations]
734low = '30s'
735medium = '3m'
736high = '15m'
737max = '1h'
738
739[sessions]
740session_command_buffer = 32
741session_event_buffer = 260
742
743[sessions.limits]
744
745[sessions.initial_internal_request_timeout]
746secs = 20
747nanos = 0
748
749[sessions.protocol_breach_request_timeout]
750secs = 120
751nanos = 0
752
753[prune]
754block_interval = 5
755
756[prune.parts]
757sender_recovery = { distance = 16384 }
758transaction_lookup = 'full'
759receipts = { before = 1920000 }
760account_history = { distance = 16384 }
761storage_history = { distance = 16384 }
762[prune.parts.receipts_log_filter]
763'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
764'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
765#";
766        let _conf: Config = toml::from_str(alpha_0_0_8).unwrap();
767
768        let alpha_0_0_11 = r"#
769[prune.segments]
770sender_recovery = { distance = 16384 }
771transaction_lookup = 'full'
772receipts = { before = 1920000 }
773account_history = { distance = 16384 }
774storage_history = { distance = 16384 }
775[prune.segments.receipts_log_filter]
776'0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48' = { before = 17000000 }
777'0xdac17f958d2ee523a2206206994597c13d831ec7' = { distance = 1000 }
778#";
779        let _conf: Config = toml::from_str(alpha_0_0_11).unwrap();
780
781        let alpha_0_0_18 = r"#
782[stages.headers]
783downloader_max_concurrent_requests = 100
784downloader_min_concurrent_requests = 5
785downloader_max_buffered_responses = 100
786downloader_request_limit = 1000
787commit_threshold = 10000
788
789[stages.total_difficulty]
790commit_threshold = 100000
791
792[stages.bodies]
793downloader_request_limit = 200
794downloader_stream_batch_size = 1000
795downloader_max_buffered_blocks_size_bytes = 2147483648
796downloader_min_concurrent_requests = 5
797downloader_max_concurrent_requests = 100
798
799[stages.sender_recovery]
800commit_threshold = 5000000
801
802[stages.execution]
803max_blocks = 500000
804max_changes = 5000000
805max_cumulative_gas = 1500000000000
806[stages.execution.max_duration]
807secs = 600
808nanos = 0
809
810[stages.account_hashing]
811clean_threshold = 500000
812commit_threshold = 100000
813
814[stages.storage_hashing]
815clean_threshold = 500000
816commit_threshold = 100000
817
818[stages.merkle]
819clean_threshold = 50000
820
821[stages.transaction_lookup]
822commit_threshold = 5000000
823
824[stages.index_account_history]
825commit_threshold = 100000
826
827[stages.index_storage_history]
828commit_threshold = 100000
829
830[peers]
831refill_slots_interval = '5s'
832trusted_nodes = []
833connect_trusted_nodes_only = false
834max_backoff_count = 5
835ban_duration = '12h'
836
837[peers.connection_info]
838max_outbound = 100
839max_inbound = 30
840max_concurrent_outbound_dials = 10
841
842[peers.reputation_weights]
843bad_message = -16384
844bad_block = -16384
845bad_transactions = -16384
846already_seen_transactions = 0
847timeout = -4096
848bad_protocol = -2147483648
849failed_to_connect = -25600
850dropped = -4096
851bad_announcement = -1024
852
853[peers.backoff_durations]
854low = '30s'
855medium = '3m'
856high = '15m'
857max = '1h'
858
859[sessions]
860session_command_buffer = 32
861session_event_buffer = 260
862
863[sessions.limits]
864
865[sessions.initial_internal_request_timeout]
866secs = 20
867nanos = 0
868
869[sessions.protocol_breach_request_timeout]
870secs = 120
871nanos = 0
872#";
873        let conf: Config = toml::from_str(alpha_0_0_18).unwrap();
874        assert_eq!(conf.stages.execution.max_duration, Some(Duration::from_secs(10 * 60)));
875
876        let alpha_0_0_19 = r"#
877[stages.headers]
878downloader_max_concurrent_requests = 100
879downloader_min_concurrent_requests = 5
880downloader_max_buffered_responses = 100
881downloader_request_limit = 1000
882commit_threshold = 10000
883
884[stages.total_difficulty]
885commit_threshold = 100000
886
887[stages.bodies]
888downloader_request_limit = 200
889downloader_stream_batch_size = 1000
890downloader_max_buffered_blocks_size_bytes = 2147483648
891downloader_min_concurrent_requests = 5
892downloader_max_concurrent_requests = 100
893
894[stages.sender_recovery]
895commit_threshold = 5000000
896
897[stages.execution]
898max_blocks = 500000
899max_changes = 5000000
900max_cumulative_gas = 1500000000000
901max_duration = '10m'
902
903[stages.account_hashing]
904clean_threshold = 500000
905commit_threshold = 100000
906
907[stages.storage_hashing]
908clean_threshold = 500000
909commit_threshold = 100000
910
911[stages.merkle]
912clean_threshold = 50000
913
914[stages.transaction_lookup]
915commit_threshold = 5000000
916
917[stages.index_account_history]
918commit_threshold = 100000
919
920[stages.index_storage_history]
921commit_threshold = 100000
922
923[peers]
924refill_slots_interval = '5s'
925trusted_nodes = []
926connect_trusted_nodes_only = false
927max_backoff_count = 5
928ban_duration = '12h'
929
930[peers.connection_info]
931max_outbound = 100
932max_inbound = 30
933max_concurrent_outbound_dials = 10
934
935[peers.reputation_weights]
936bad_message = -16384
937bad_block = -16384
938bad_transactions = -16384
939already_seen_transactions = 0
940timeout = -4096
941bad_protocol = -2147483648
942failed_to_connect = -25600
943dropped = -4096
944bad_announcement = -1024
945
946[peers.backoff_durations]
947low = '30s'
948medium = '3m'
949high = '15m'
950max = '1h'
951
952[sessions]
953session_command_buffer = 32
954session_event_buffer = 260
955
956[sessions.limits]
957
958[sessions.initial_internal_request_timeout]
959secs = 20
960nanos = 0
961
962[sessions.protocol_breach_request_timeout]
963secs = 120
964nanos = 0
965#";
966        let _conf: Config = toml::from_str(alpha_0_0_19).unwrap();
967    }
968
969    // ensures prune config deserialization is backwards compatible
970    #[test]
971    fn test_backwards_compatibility_prune_full() {
972        let s = r"#
973[prune]
974block_interval = 5
975
976[prune.segments]
977sender_recovery = { distance = 16384 }
978transaction_lookup = 'full'
979receipts = { distance = 16384 }
980#";
981        let _conf: Config = toml::from_str(s).unwrap();
982
983        let s = r"#
984[prune]
985block_interval = 5
986
987[prune.segments]
988sender_recovery = { distance = 16384 }
989transaction_lookup = 'full'
990receipts = 'full'
991#";
992        let err = toml::from_str::<Config>(s).unwrap_err().to_string();
993        assert!(err.contains("invalid value: string \"full\""), "{}", err);
994    }
995
996    #[test]
997    fn test_prune_config_merge() {
998        let mut config1 = PruneConfig {
999            block_interval: 5,
1000            segments: PruneModes {
1001                sender_recovery: Some(PruneMode::Full),
1002                transaction_lookup: None,
1003                receipts: Some(PruneMode::Distance(1000)),
1004                account_history: None,
1005                storage_history: Some(PruneMode::Before(5000)),
1006                bodies_history: None,
1007                merkle_changesets: PruneMode::Before(0),
1008                #[expect(deprecated)]
1009                receipts_log_filter: (),
1010            },
1011        };
1012
1013        let config2 = PruneConfig {
1014            block_interval: 10,
1015            segments: PruneModes {
1016                sender_recovery: Some(PruneMode::Distance(500)),
1017                transaction_lookup: Some(PruneMode::Full),
1018                receipts: Some(PruneMode::Full),
1019                account_history: Some(PruneMode::Distance(2000)),
1020                storage_history: Some(PruneMode::Distance(3000)),
1021                bodies_history: None,
1022                merkle_changesets: PruneMode::Distance(10000),
1023                #[expect(deprecated)]
1024                receipts_log_filter: (),
1025            },
1026        };
1027
1028        config1.merge(config2);
1029
1030        // Check that the configuration has been merged. Any configuration present in config1
1031        // should not be overwritten by config2
1032        assert_eq!(config1.block_interval, 10);
1033        assert_eq!(config1.segments.sender_recovery, Some(PruneMode::Full));
1034        assert_eq!(config1.segments.transaction_lookup, Some(PruneMode::Full));
1035        assert_eq!(config1.segments.receipts, Some(PruneMode::Distance(1000)));
1036        assert_eq!(config1.segments.account_history, Some(PruneMode::Distance(2000)));
1037        assert_eq!(config1.segments.storage_history, Some(PruneMode::Before(5000)));
1038        assert_eq!(config1.segments.merkle_changesets, PruneMode::Distance(10000));
1039    }
1040
1041    #[test]
1042    fn test_conf_trust_nodes_only() {
1043        let trusted_nodes_only = r"#
1044[peers]
1045trusted_nodes_only = true
1046#";
1047        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1048        assert!(conf.peers.trusted_nodes_only);
1049
1050        let trusted_nodes_only = r"#
1051[peers]
1052connect_trusted_nodes_only = true
1053#";
1054        let conf: Config = toml::from_str(trusted_nodes_only).unwrap();
1055        assert!(conf.peers.trusted_nodes_only);
1056    }
1057
1058    #[test]
1059    fn test_can_support_dns_in_trusted_nodes() {
1060        let reth_toml = r#"
1061    [peers]
1062    trusted_nodes = [
1063        "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1064        "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303"
1065    ]
1066    "#;
1067
1068        let conf: Config = toml::from_str(reth_toml).unwrap();
1069        assert_eq!(conf.peers.trusted_nodes.len(), 2);
1070
1071        let expected_enodes = vec![
1072            "enode://0401e494dbd0c84c5c0f72adac5985d2f2525e08b68d448958aae218f5ac8198a80d1498e0ebec2ce38b1b18d6750f6e61a56b4614c5a6c6cf0981c39aed47dc@34.159.32.127:30303",
1073            "enode://e9675164b5e17b9d9edf0cc2bd79e6b6f487200c74d1331c220abb5b8ee80c2eefbf18213989585e9d0960683e819542e11d4eefb5f2b4019e1e49f9fd8fff18@berav2-bootnode.staketab.org:30303",
1074        ];
1075
1076        for enode in expected_enodes {
1077            let node = TrustedPeer::from_str(enode).unwrap();
1078            assert!(conf.peers.trusted_nodes.contains(&node));
1079        }
1080    }
1081}