1use crate::common::{AccessRights, CliNodeComponents, CliNodeTypes, Environment, EnvironmentArgs};
6use alloy_eips::BlockHashOrNumber;
7use alloy_primitives::Sealable;
8use clap::Parser;
9use reth_chainspec::{EthChainSpec, EthereumHardforks, Hardforks};
10use reth_cli::chainspec::ChainSpecParser;
11use reth_cli_runner::CliContext;
12use reth_cli_util::get_secret_key;
13use reth_config::config::{HashingConfig, SenderRecoveryConfig, TransactionLookupConfig};
14use reth_db_api::database_metrics::DatabaseMetrics;
15use reth_downloaders::{
16 bodies::bodies::BodiesDownloaderBuilder,
17 headers::reverse_headers::ReverseHeadersDownloaderBuilder,
18};
19use reth_eth_wire::NetPrimitivesFor;
20use reth_exex::ExExManagerHandle;
21use reth_network::BlockDownloaderProvider;
22use reth_network_p2p::HeadersClient;
23use reth_node_core::{
24 args::{NetworkArgs, StageEnum},
25 version::{
26 BUILD_PROFILE_NAME, CARGO_PKG_VERSION, VERGEN_BUILD_TIMESTAMP, VERGEN_CARGO_FEATURES,
27 VERGEN_CARGO_TARGET_TRIPLE, VERGEN_GIT_SHA,
28 },
29};
30use reth_node_metrics::{
31 chain::ChainSpecInfo,
32 hooks::Hooks,
33 server::{MetricServer, MetricServerConfig},
34 version::VersionInfo,
35};
36use reth_provider::{
37 writer::UnifiedStorageWriter, ChainSpecProvider, DatabaseProviderFactory,
38 StageCheckpointReader, StageCheckpointWriter, StaticFileProviderFactory,
39};
40use reth_stages::{
41 stages::{
42 AccountHashingStage, BodyStage, ExecutionStage, HeaderStage, IndexAccountHistoryStage,
43 IndexStorageHistoryStage, MerkleStage, SenderRecoveryStage, StorageHashingStage,
44 TransactionLookupStage,
45 },
46 ExecInput, ExecOutput, ExecutionStageThresholds, Stage, StageExt, UnwindInput, UnwindOutput,
47};
48use std::{any::Any, net::SocketAddr, sync::Arc, time::Instant};
49use tokio::sync::watch;
50use tracing::*;
51
52#[derive(Debug, Parser)]
54pub struct Command<C: ChainSpecParser> {
55 #[command(flatten)]
56 env: EnvironmentArgs<C>,
57
58 #[arg(long, value_name = "SOCKET")]
62 metrics: Option<SocketAddr>,
63
64 #[arg(value_enum)]
66 stage: StageEnum,
67
68 #[arg(long)]
70 from: u64,
71
72 #[arg(long, short)]
74 to: u64,
75
76 #[arg(long)]
78 batch_size: Option<u64>,
79
80 #[arg(long, short)]
86 skip_unwind: bool,
87
88 #[arg(long, short)]
94 commit: bool,
95
96 #[arg(long)]
98 checkpoints: bool,
99
100 #[command(flatten)]
101 network: NetworkArgs,
102}
103
104impl<C: ChainSpecParser<ChainSpec: EthChainSpec + Hardforks + EthereumHardforks>> Command<C> {
105 pub async fn execute<N, Comp, F, P>(self, ctx: CliContext, components: F) -> eyre::Result<()>
107 where
108 N: CliNodeTypes<ChainSpec = C::ChainSpec>,
109 Comp: CliNodeComponents<N>,
110 F: FnOnce(Arc<C::ChainSpec>) -> Comp,
111 P: NetPrimitivesFor<N::Primitives>,
112 {
113 let _ = fdlimit::raise_fd_limit();
116
117 let Environment { provider_factory, config, data_dir } =
118 self.env.init::<N>(AccessRights::RW)?;
119
120 let mut provider_rw = provider_factory.database_provider_rw()?;
121 let components = components(provider_factory.chain_spec());
122
123 if let Some(listen_addr) = self.metrics {
124 info!(target: "reth::cli", "Starting metrics endpoint at {}", listen_addr);
125 let config = MetricServerConfig::new(
126 listen_addr,
127 VersionInfo {
128 version: CARGO_PKG_VERSION,
129 build_timestamp: VERGEN_BUILD_TIMESTAMP,
130 cargo_features: VERGEN_CARGO_FEATURES,
131 git_sha: VERGEN_GIT_SHA,
132 target_triple: VERGEN_CARGO_TARGET_TRIPLE,
133 build_profile: BUILD_PROFILE_NAME,
134 },
135 ChainSpecInfo { name: provider_factory.chain_spec().chain().to_string() },
136 ctx.task_executor,
137 Hooks::builder()
138 .with_hook({
139 let db = provider_factory.db_ref().clone();
140 move || db.report_metrics()
141 })
142 .with_hook({
143 let sfp = provider_factory.static_file_provider();
144 move || {
145 if let Err(error) = sfp.report_metrics() {
146 error!(%error, "Failed to report metrics from static file provider");
147 }
148 }
149 })
150 .build(),
151 );
152
153 MetricServer::new(config).serve().await?;
154 }
155
156 let batch_size = self.batch_size.unwrap_or(self.to.saturating_sub(self.from) + 1);
157
158 let etl_config = config.stages.etl.clone();
159 let prune_modes = config.prune.clone().map(|prune| prune.segments).unwrap_or_default();
160
161 let (mut exec_stage, mut unwind_stage): (Box<dyn Stage<_>>, Option<Box<dyn Stage<_>>>) =
162 match self.stage {
163 StageEnum::Headers => {
164 let consensus = Arc::new(components.consensus().clone());
165
166 let network_secret_path = self
167 .network
168 .p2p_secret_key
169 .clone()
170 .unwrap_or_else(|| data_dir.p2p_secret());
171 let p2p_secret_key = get_secret_key(&network_secret_path)?;
172
173 let default_peers_path = data_dir.known_peers();
174
175 let network = self
176 .network
177 .network_config::<P>(
178 &config,
179 provider_factory.chain_spec(),
180 p2p_secret_key,
181 default_peers_path,
182 )
183 .build(provider_factory.clone())
184 .start_network()
185 .await?;
186 let fetch_client = Arc::new(network.fetch_client().await?);
187
188 let tip: P::BlockHeader = loop {
190 match fetch_client.get_header(BlockHashOrNumber::Number(self.to)).await {
191 Ok(header) => {
192 if let Some(header) = header.into_data() {
193 break header
194 }
195 }
196 Err(error) if error.is_retryable() => {
197 warn!(target: "reth::cli", "Error requesting header: {error}. Retrying...")
198 }
199 Err(error) => return Err(error.into()),
200 }
201 };
202 let (_, rx) = watch::channel(tip.hash_slow());
203 (
204 Box::new(HeaderStage::new(
205 provider_factory.clone(),
206 ReverseHeadersDownloaderBuilder::new(config.stages.headers)
207 .build(fetch_client, consensus.clone()),
208 rx,
209 consensus,
210 etl_config,
211 )),
212 None,
213 )
214 }
215 StageEnum::Bodies => {
216 let consensus = Arc::new(components.consensus().clone());
217
218 let mut config = config;
219 config.peers.trusted_nodes_only = self.network.trusted_only;
220 config.peers.trusted_nodes.extend(self.network.trusted_peers.clone());
221
222 let network_secret_path = self
223 .network
224 .p2p_secret_key
225 .clone()
226 .unwrap_or_else(|| data_dir.p2p_secret());
227 let p2p_secret_key = get_secret_key(&network_secret_path)?;
228
229 let default_peers_path = data_dir.known_peers();
230
231 let network = self
232 .network
233 .network_config::<P>(
234 &config,
235 provider_factory.chain_spec(),
236 p2p_secret_key,
237 default_peers_path,
238 )
239 .build(provider_factory.clone())
240 .start_network()
241 .await?;
242 let fetch_client = Arc::new(network.fetch_client().await?);
243
244 let stage = BodyStage::new(
245 BodiesDownloaderBuilder::default()
246 .with_stream_batch_size(batch_size as usize)
247 .with_request_limit(config.stages.bodies.downloader_request_limit)
248 .with_max_buffered_blocks_size_bytes(
249 config.stages.bodies.downloader_max_buffered_blocks_size_bytes,
250 )
251 .with_concurrent_requests_range(
252 config.stages.bodies.downloader_min_concurrent_requests..=
253 config.stages.bodies.downloader_max_concurrent_requests,
254 )
255 .build(fetch_client, consensus.clone(), provider_factory.clone()),
256 );
257 (Box::new(stage), None)
258 }
259 StageEnum::Senders => (
260 Box::new(SenderRecoveryStage::new(SenderRecoveryConfig {
261 commit_threshold: batch_size,
262 })),
263 None,
264 ),
265 StageEnum::Execution => (
266 Box::new(ExecutionStage::new(
267 components.executor().clone(),
268 Arc::new(components.consensus().clone()),
269 ExecutionStageThresholds {
270 max_blocks: Some(batch_size),
271 max_changes: None,
272 max_cumulative_gas: None,
273 max_duration: None,
274 },
275 config.stages.merkle.clean_threshold,
276 ExExManagerHandle::empty(),
277 )),
278 None,
279 ),
280 StageEnum::TxLookup => (
281 Box::new(TransactionLookupStage::new(
282 TransactionLookupConfig { chunk_size: batch_size },
283 etl_config,
284 prune_modes.transaction_lookup,
285 )),
286 None,
287 ),
288 StageEnum::AccountHashing => (
289 Box::new(AccountHashingStage::new(
290 HashingConfig { clean_threshold: 1, commit_threshold: batch_size },
291 etl_config,
292 )),
293 None,
294 ),
295 StageEnum::StorageHashing => (
296 Box::new(StorageHashingStage::new(
297 HashingConfig { clean_threshold: 1, commit_threshold: batch_size },
298 etl_config,
299 )),
300 None,
301 ),
302 StageEnum::Merkle => (
303 Box::new(MerkleStage::new_execution(config.stages.merkle.clean_threshold)),
304 Some(Box::new(MerkleStage::default_unwind())),
305 ),
306 StageEnum::AccountHistory => (
307 Box::new(IndexAccountHistoryStage::new(
308 config.stages.index_account_history,
309 etl_config,
310 prune_modes.account_history,
311 )),
312 None,
313 ),
314 StageEnum::StorageHistory => (
315 Box::new(IndexStorageHistoryStage::new(
316 config.stages.index_storage_history,
317 etl_config,
318 prune_modes.storage_history,
319 )),
320 None,
321 ),
322 _ => return Ok(()),
323 };
324 if let Some(unwind_stage) = &unwind_stage {
325 assert_eq!((*exec_stage).type_id(), (**unwind_stage).type_id());
326 }
327
328 let checkpoint = provider_rw.get_stage_checkpoint(exec_stage.id())?.unwrap_or_default();
329
330 let unwind_stage = unwind_stage.as_mut().unwrap_or(&mut exec_stage);
331
332 let mut unwind = UnwindInput {
333 checkpoint: checkpoint.with_block_number(self.to),
334 unwind_to: self.from,
335 bad_block: None,
336 };
337
338 if !self.skip_unwind {
339 while unwind.checkpoint.block_number > self.from {
340 let UnwindOutput { checkpoint } = unwind_stage.unwind(&provider_rw, unwind)?;
341 unwind.checkpoint = checkpoint;
342
343 if self.checkpoints {
344 provider_rw.save_stage_checkpoint(unwind_stage.id(), checkpoint)?;
345 }
346
347 if self.commit {
348 UnifiedStorageWriter::commit_unwind(provider_rw)?;
349 provider_rw = provider_factory.database_provider_rw()?;
350 }
351 }
352 }
353
354 let mut input = ExecInput {
355 target: Some(self.to),
356 checkpoint: Some(checkpoint.with_block_number(self.from)),
357 };
358
359 let start = Instant::now();
360 info!(target: "reth::cli", stage = %self.stage, "Executing stage");
361 loop {
362 exec_stage.execute_ready(input).await?;
363 let ExecOutput { checkpoint, done } = exec_stage.execute(&provider_rw, input)?;
364
365 input.checkpoint = Some(checkpoint);
366
367 if self.checkpoints {
368 provider_rw.save_stage_checkpoint(exec_stage.id(), checkpoint)?;
369 }
370 if self.commit {
371 UnifiedStorageWriter::commit(provider_rw)?;
372 provider_rw = provider_factory.database_provider_rw()?;
373 }
374
375 if done {
376 break
377 }
378 }
379 info!(target: "reth::cli", stage = %self.stage, time = ?start.elapsed(), "Finished stage");
380
381 Ok(())
382 }
383 pub fn chain_spec(&self) -> Option<&Arc<C::ChainSpec>> {
385 Some(&self.env.chain)
386 }
387}