1use alloy_primitives::B256;
4use futures::StreamExt;
5use reth_config::Config;
6use reth_consensus::FullConsensus;
7use reth_db_api::{tables, transaction::DbTx};
8use reth_downloaders::{
9 bodies::bodies::BodiesDownloaderBuilder,
10 file_client::{ChunkedFileReader, FileClient, DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE},
11 headers::reverse_headers::ReverseHeadersDownloaderBuilder,
12};
13use reth_evm::ConfigureEvm;
14use reth_network_p2p::{
15 bodies::downloader::BodyDownloader,
16 headers::downloader::{HeaderDownloader, SyncTarget},
17};
18use reth_node_api::BlockTy;
19use reth_node_events::node::NodeEvent;
20use reth_provider::{
21 providers::ProviderNodeTypes, BlockNumReader, HeaderProvider, ProviderError, ProviderFactory,
22 StageCheckpointReader,
23};
24use reth_prune::PruneModes;
25use reth_stages::{prelude::*, Pipeline, StageId, StageSet};
26use reth_static_file::StaticFileProducer;
27use std::{path::Path, sync::Arc};
28use tokio::sync::watch;
29use tracing::{debug, error, info};
30
31#[derive(Debug, Clone, Default)]
33pub struct ImportConfig {
34 pub no_state: bool,
36 pub chunk_len: Option<u64>,
38}
39
40#[derive(Debug)]
42pub struct ImportResult {
43 pub total_decoded_blocks: usize,
45 pub total_decoded_txns: usize,
47 pub total_imported_blocks: usize,
49 pub total_imported_txns: usize,
51}
52
53impl ImportResult {
54 pub fn is_complete(&self) -> bool {
56 self.total_decoded_blocks == self.total_imported_blocks &&
57 self.total_decoded_txns == self.total_imported_txns
58 }
59}
60
61pub async fn import_blocks_from_file<N>(
67 path: &Path,
68 import_config: ImportConfig,
69 provider_factory: ProviderFactory<N>,
70 config: &Config,
71 executor: impl ConfigureEvm<Primitives = N::Primitives> + 'static,
72 consensus: Arc<
73 impl FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
74 >,
75) -> eyre::Result<ImportResult>
76where
77 N: ProviderNodeTypes,
78{
79 if import_config.no_state {
80 info!(target: "reth::import", "Disabled stages requiring state");
81 }
82
83 debug!(target: "reth::import",
84 chunk_byte_len=import_config.chunk_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE),
85 "Chunking chain import"
86 );
87
88 info!(target: "reth::import", "Consensus engine initialized");
89
90 let mut reader = ChunkedFileReader::new(path, import_config.chunk_len).await?;
92
93 let provider = provider_factory.provider()?;
94 let init_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()?;
95 let init_txns = provider.tx_ref().entries::<tables::TransactionHashNumbers>()?;
96 drop(provider);
97
98 let mut total_decoded_blocks = 0;
99 let mut total_decoded_txns = 0;
100
101 let mut sealed_header = provider_factory
102 .sealed_header(provider_factory.last_block_number()?)?
103 .expect("should have genesis");
104
105 let static_file_producer =
106 StaticFileProducer::new(provider_factory.clone(), PruneModes::default());
107
108 while let Some(file_client) =
109 reader.next_chunk::<BlockTy<N>>(consensus.clone(), Some(sealed_header)).await?
110 {
111 info!(target: "reth::import",
113 "Importing chain file chunk"
114 );
115
116 let tip = file_client.tip().ok_or(eyre::eyre!("file client has no tip"))?;
117 info!(target: "reth::import", "Chain file chunk read");
118
119 total_decoded_blocks += file_client.headers_len();
120 total_decoded_txns += file_client.total_transactions();
121
122 let (mut pipeline, events) = build_import_pipeline_impl(
123 config,
124 provider_factory.clone(),
125 &consensus,
126 Arc::new(file_client),
127 static_file_producer.clone(),
128 import_config.no_state,
129 executor.clone(),
130 )?;
131
132 pipeline.set_tip(tip);
134 debug!(target: "reth::import", ?tip, "Tip manually set");
135
136 let latest_block_number =
137 provider_factory.get_stage_checkpoint(StageId::Finish)?.map(|ch| ch.block_number);
138 tokio::spawn(reth_node_events::node::handle_events(None, latest_block_number, events));
139
140 info!(target: "reth::import", "Starting sync pipeline");
142 tokio::select! {
143 res = pipeline.run() => res?,
144 _ = tokio::signal::ctrl_c() => {
145 info!(target: "reth::import", "Import interrupted by user");
146 break;
147 },
148 }
149
150 sealed_header = provider_factory
151 .sealed_header(provider_factory.last_block_number()?)?
152 .expect("should have genesis");
153 }
154
155 let provider = provider_factory.provider()?;
156 let total_imported_blocks = provider.tx_ref().entries::<tables::HeaderNumbers>()? - init_blocks;
157 let total_imported_txns =
158 provider.tx_ref().entries::<tables::TransactionHashNumbers>()? - init_txns;
159
160 let result = ImportResult {
161 total_decoded_blocks,
162 total_decoded_txns,
163 total_imported_blocks,
164 total_imported_txns,
165 };
166
167 if !result.is_complete() {
168 error!(target: "reth::import",
169 total_decoded_blocks,
170 total_imported_blocks,
171 total_decoded_txns,
172 total_imported_txns,
173 "Chain was partially imported"
174 );
175 } else {
176 info!(target: "reth::import",
177 total_imported_blocks,
178 total_imported_txns,
179 "Chain was fully imported"
180 );
181 }
182
183 Ok(result)
184}
185
186pub fn build_import_pipeline_impl<N, C, E>(
191 config: &Config,
192 provider_factory: ProviderFactory<N>,
193 consensus: &Arc<C>,
194 file_client: Arc<FileClient<BlockTy<N>>>,
195 static_file_producer: StaticFileProducer<ProviderFactory<N>>,
196 disable_exec: bool,
197 evm_config: E,
198) -> eyre::Result<(Pipeline<N>, impl futures::Stream<Item = NodeEvent<N::Primitives>> + use<N, C, E>)>
199where
200 N: ProviderNodeTypes,
201 C: FullConsensus<N::Primitives, Error = reth_consensus::ConsensusError> + 'static,
202 E: ConfigureEvm<Primitives = N::Primitives> + 'static,
203{
204 if !file_client.has_canonical_blocks() {
205 eyre::bail!("unable to import non canonical blocks");
206 }
207
208 let last_block_number = provider_factory.last_block_number()?;
210 let local_head = provider_factory
211 .sealed_header(last_block_number)?
212 .ok_or_else(|| ProviderError::HeaderNotFound(last_block_number.into()))?;
213
214 let mut header_downloader = ReverseHeadersDownloaderBuilder::new(config.stages.headers)
215 .build(file_client.clone(), consensus.clone())
216 .into_task();
217 header_downloader.update_local_head(local_head);
220 header_downloader.update_sync_target(SyncTarget::Tip(file_client.tip().unwrap()));
221
222 let mut body_downloader = BodiesDownloaderBuilder::new(config.stages.bodies)
223 .build(file_client.clone(), consensus.clone(), provider_factory.clone())
224 .into_task();
225 body_downloader
228 .set_download_range(file_client.min_block().unwrap()..=file_client.max_block().unwrap())
229 .expect("failed to set download range");
230
231 let (tip_tx, tip_rx) = watch::channel(B256::ZERO);
232
233 let max_block = file_client.max_block().unwrap_or(0);
234
235 let pipeline = Pipeline::builder()
236 .with_tip_sender(tip_tx)
237 .with_max_block(max_block)
239 .with_fail_on_unwind(true)
240 .add_stages(
241 DefaultStages::new(
242 provider_factory.clone(),
243 tip_rx,
244 consensus.clone(),
245 header_downloader,
246 body_downloader,
247 evm_config,
248 config.stages.clone(),
249 PruneModes::default(),
250 None,
251 )
252 .builder()
253 .disable_all_if(&StageId::STATE_REQUIRED, || disable_exec),
254 )
255 .build(provider_factory, static_file_producer);
256
257 let events = pipeline.events().map(Into::into);
258
259 Ok((pipeline, events))
260}