1use alloy_primitives::B256;
4use futures::StreamExt;
5use reth_config::Config;
6use reth_consensus::FullConsensus;
7use reth_db_api::{tables, transaction::DbTx};
8use reth_downloaders::{
9 bodies::bodies::BodiesDownloaderBuilder,
10 file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
11 headers::reverse_headers::ReverseHeadersDownloaderBuilder,
12};
13use reth_evm::ConfigureEvm;
14use reth_network_p2p::{
15 bodies::downloader::BodyDownloader,
16 headers::downloader::{HeaderDownloader, SyncTarget},
17};
18use reth_node_api::BlockTy;
19use reth_node_events::node::NodeEvent;
20use reth_provider::{
21 providers::ProviderNodeTypes, BlockNumReader, HeaderProvider, ProviderError, ProviderFactory,
22 RocksDBProviderFactory, StageCheckpointReader,
23};
24use reth_prune::PruneModes;
25use reth_stages::{prelude::*, ControlFlow, Pipeline, StageId, StageSet};
26use reth_static_file::StaticFileProducer;
27use reth_storage_api::StorageSettingsCache;
28use std::{path::Path, sync::Arc};
29use tokio::sync::watch;
30use tracing::{debug, error, info, warn};
31
32#[derive(Debug, Clone, Default)]
34pub struct ImportConfig {
35 pub no_state: bool,
37 pub chunk_len: Option<u64>,
39 pub fail_on_invalid_block: bool,
42}
43
44#[derive(Debug)]
46pub struct ImportResult {
47 pub total_decoded_blocks: usize,
49 pub total_decoded_txns: usize,
51 pub total_imported_blocks: usize,
53 pub total_imported_txns: usize,
55 pub stopped_on_invalid_block: bool,
57 pub bad_block: Option<u64>,
59 pub last_valid_block: Option<u64>,
61}
62
63impl ImportResult {
64 pub fn is_complete(&self) -> bool {
66 self.total_decoded_blocks == self.total_imported_blocks &&
67 self.total_decoded_txns == self.total_imported_txns
68 }
69
70 pub fn is_successful(&self) -> bool {
75 self.is_complete() || self.stopped_on_invalid_block
76 }
77}
78
79pub async fn import_blocks_from_file<N>(
85 path: &Path,
86 import_config: ImportConfig,
87 provider_factory: ProviderFactory<N>,
88 config: &Config,
89 executor: impl ConfigureEvm<Primitives = N::Primitives> + 'static,
90 consensus: Arc<impl FullConsensus<N::Primitives> + 'static>,
91 runtime: reth_tasks::Runtime,
92) -> eyre::Result<ImportResult>
93where
94 N: ProviderNodeTypes,
95{
96 if import_config.no_state {
97 info!(target: "reth::import", "Disabled stages requiring state");
98 }
99
100 debug!(target: "reth::import",
101 chunk_byte_len=import_config.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
102 "Chunking chain import"
103 );
104
105 info!(target: "reth::import", "Consensus engine initialized");
106
107 let mut reader = ChunkedFileReader::new(path, import_config.chunk_len).await?;
109
110 let provider = provider_factory.provider()?;
111 let init_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
112 let init_txns = if provider_factory.cached_storage_settings().storage_v2 {
113 provider_factory.rocksdb_provider().iter::<tables::TransactionHashNumbers>()?.count()
114 } else {
115 provider.tx_ref().entries::<tables::TransactionHashNumbers>()?
116 };
117 drop(provider);
118
119 let mut total_decoded_blocks = 0;
120 let mut total_decoded_txns = 0;
121
122 let mut sealed_header = provider_factory
123 .sealed_header(provider_factory.last_block_number()?)?
124 .expect("should have genesis");
125
126 let static_file_producer =
127 StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
128
129 let mut stopped_on_invalid_block = false;
131 let mut bad_block_number: Option<u64> = None;
132 let mut last_valid_block_number: Option<u64> = None;
133
134 while let Some(file_client) =
135 reader.next_chunk::<BlockTy<N>>(consensus.clone(), Some(sealed_header)).await?
136 {
137 info!(target: "reth::import",
139 "Importing chain file chunk"
140 );
141
142 let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
143 info!(target: "reth::import", "Chain file chunk read");
144
145 total_decoded_blocks += file_client.headers_len();
146 total_decoded_txns += file_client.total_transactions();
147
148 let (mut pipeline, events) = build_import_pipeline_impl(
149 config,
150 provider_factory.clone(),
151 &consensus,
152 Arc::new(file_client),
153 static_file_producer.clone(),
154 import_config.no_state,
155 executor.clone(),
156 runtime.clone(),
157 )?;
158
159 pipeline.set_tip(tip);
161 debug!(target: "reth::import", ?tip, "Tip manually set");
162
163 let latest_block_number =
164 provider_factory.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
165 tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events));
166
167 info!(target: "reth::import", "Starting sync pipeline");
169 if import_config.fail_on_invalid_block {
170 tokio::select! {
172 res = pipeline.run() => res?,
173 _ = tokio::signal::ctrl_c() => {
174 info!(target: "reth::import", "Import interrupted by user");
175 break;
176 },
177 }
178 } else {
179 let result = tokio::select! {
181 res = pipeline.run_loop() => res,
182 _ = tokio::signal::ctrl_c() => {
183 info!(target: "reth::import", "Import interrupted by user");
184 break;
185 },
186 };
187
188 match result {
189 Ok(ControlFlow::Unwind { target, bad_block }) => {
190 let bad = bad_block.block.number;
192 warn!(
193 target: "reth::import",
194 bad_block = bad,
195 last_valid_block = target,
196 "Invalid block encountered during import; stopping at last valid block"
197 );
198 stopped_on_invalid_block = true;
199 bad_block_number = Some(bad);
200 last_valid_block_number = Some(target);
201 break;
202 }
203 Ok(ControlFlow::Continue { block_number }) => {
204 debug!(target: "reth::import", block_number, "Pipeline chunk completed");
205 }
206 Ok(ControlFlow::NoProgress { block_number }) => {
207 debug!(target: "reth::import", ?block_number, "Pipeline made no progress");
208 }
209 Err(e) => {
210 return Err(e.into());
212 }
213 }
214 }
215
216 sealed_header = provider_factory
217 .sealed_header(provider_factory.last_block_number()?)?
218 .expect("should have genesis");
219 }
220
221 let provider = provider_factory.provider()?;
222 let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()? - init_blocks;
223 let current_txns = if provider_factory.cached_storage_settings().storage_v2 {
224 provider_factory.rocksdb_provider().iter::<tables::TransactionHashNumbers>()?.count()
225 } else {
226 provider.tx_ref().entries::<tables::TransactionHashNumbers>()?
227 };
228 let total_imported_txns = current_txns - init_txns;
229
230 let result = ImportResult {
231 total_decoded_blocks,
232 total_decoded_txns,
233 total_imported_blocks,
234 total_imported_txns,
235 stopped_on_invalid_block,
236 bad_block: bad_block_number,
237 last_valid_block: last_valid_block_number,
238 };
239
240 if result.stopped_on_invalid_block {
241 info!(target: "reth::import",
242 total_imported_blocks,
243 total_imported_txns,
244 bad_block = ?result.bad_block,
245 last_valid_block = ?result.last_valid_block,
246 "Import stopped at last valid block due to invalid block"
247 );
248 } else if !result.is_complete() {
249 error!(target: "reth::import",
250 total_decoded_blocks,
251 total_imported_blocks,
252 total_decoded_txns,
253 total_imported_txns,
254 "Chain was partially imported"
255 );
256 } else {
257 info!(target: "reth::import",
258 total_imported_blocks,
259 total_imported_txns,
260 "Chain was fully imported"
261 );
262 }
263
264 Ok(result)
265}
266
267#[expect(clippy::too_many_arguments)]
272pub fn build_import_pipeline_impl<N, C, E>(
273 config: &Config,
274 provider_factory: ProviderFactory<N>,
275 consensus: &Arc<C>,
276 file_client: Arc<FileClient<BlockTy<N>>>,
277 static_file_producer: StaticFileProducer<ProviderFactory<N>>,
278 disable_exec: bool,
279 evm_config: E,
280 runtime: reth_tasks::Runtime,
281) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>> + use<N, C, E>)>
282where
283 N: ProviderNodeTypes,
284 C: FullConsensus<N::Primitives> + 'static,
285 E: ConfigureEvm<Primitives = N::Primitives> + 'static,
286{
287 if !file_client.has_canonical_blocks() {
288 eyre::bail!("unable to import non canonical blocks");
289 }
290
291 let last_block_number = provider_factory.last_block_number()?;
293 let local_head = provider_factory
294 .sealed_header(last_block_number)?
295 .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?;
296
297 let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
298 .build(file_client.clone(), consensus.clone())
299 .into_task_with(&runtime);
300 header_downloader.update_local_head(local_head);
303 header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
304
305 let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
306 .build(file_client.clone(), consensus.clone(), provider_factory.clone())
307 .into_task_with(&runtime);
308 body_downloader
311 .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
312 .expect("failed to set download range");
313
314 let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
315
316 let max_block = file_client.max_block().unwrap_or(0);
317
318 let pipeline = Pipeline::builder()
319 .with_tip_sender(tip_tx)
320 .with_max_block(max_block)
322 .with_fail_on_unwind(true)
323 .add_stages(
324 DefaultStages::new(
325 provider_factory.clone(),
326 tip_rx,
327 consensus.clone(),
328 header_downloader,
329 body_downloader,
330 evm_config,
331 config.stages.clone(),
332 PruneModes::default(),
333 None,
334 )
335 .builder()
336 .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec),
337 )
338 .build(provider_factory, static_file_producer);
339
340 let events = pipeline.events().map(Into::into);
341
342 Ok((pipeline, events))
343}