reth_node_builder/launch/
engine.rs

1//! Engine node related functionality.
2
3use crate::{
4    common::{Attached, LaunchContextWith, WithConfigs},
5    hooks::NodeHooks,
6    rpc::{EngineShutdown, EngineValidatorAddOn, EngineValidatorBuilder, RethRpcAddOns, RpcHandle},
7    setup::build_networked_pipeline,
8    AddOns, AddOnsContext, FullNode, LaunchContext, LaunchNode, NodeAdapter,
9    NodeBuilderWithComponents, NodeComponents, NodeComponentsBuilder, NodeHandle, NodeTypesAdapter,
10};
11use alloy_consensus::BlockHeader;
12use futures::{stream_select, FutureExt, StreamExt};
13use reth_chainspec::{EthChainSpec, EthereumHardforks};
14use reth_engine_service::service::{ChainEvent, EngineService};
15use reth_engine_tree::{
16    chain::FromOrchestrator,
17    engine::{EngineApiRequest, EngineRequestHandler},
18    tree::TreeConfig,
19};
20use reth_engine_util::EngineMessageStreamExt;
21use reth_exex::ExExManagerHandle;
22use reth_network::{types::BlockRangeUpdate, NetworkSyncUpdater, SyncState};
23use reth_network_api::BlockDownloaderProvider;
24use reth_node_api::{
25    BuiltPayload, ConsensusEngineHandle, FullNodeTypes, NodeTypes, NodeTypesWithDBAdapter,
26};
27use reth_node_core::{
28    dirs::{ChainPath, DataDirPath},
29    exit::NodeExitFuture,
30    primitives::Head,
31};
32use reth_node_events::node;
33use reth_provider::{
34    providers::{BlockchainProvider, NodeTypesForProvider},
35    BlockNumReader, StorageSettingsCache,
36};
37use reth_tasks::TaskExecutor;
38use reth_tokio_util::EventSender;
39use reth_tracing::tracing::{debug, error, info};
40use reth_trie_db::ChangesetCache;
41use std::{future::Future, pin::Pin, sync::Arc};
42use tokio::sync::{mpsc::unbounded_channel, oneshot};
43use tokio_stream::wrappers::UnboundedReceiverStream;
44
45/// The engine node launcher.
46#[derive(Debug)]
47pub struct EngineNodeLauncher {
48    /// The task executor for the node.
49    pub ctx: LaunchContext,
50
51    /// Temporary configuration for engine tree.
52    /// After engine is stabilized, this should be configured through node builder.
53    pub engine_tree_config: TreeConfig,
54}
55
56impl EngineNodeLauncher {
57    /// Create a new instance of the ethereum node launcher.
58    pub const fn new(
59        task_executor: TaskExecutor,
60        data_dir: ChainPath<DataDirPath>,
61        engine_tree_config: TreeConfig,
62    ) -> Self {
63        Self { ctx: LaunchContext::new(task_executor, data_dir), engine_tree_config }
64    }
65
66    async fn launch_node<T, CB, AO>(
67        self,
68        target: NodeBuilderWithComponents<T, CB, AO>,
69    ) -> eyre::Result<NodeHandle<NodeAdapter<T, CB::Components>, AO>>
70    where
71        T: FullNodeTypes<
72            Types: NodeTypesForProvider,
73            Provider = BlockchainProvider<
74                NodeTypesWithDBAdapter<<T as FullNodeTypes>::Types, <T as FullNodeTypes>::DB>,
75            >,
76        >,
77        CB: NodeComponentsBuilder<T>,
78        AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>
79            + EngineValidatorAddOn<NodeAdapter<T, CB::Components>>,
80    {
81        let Self { ctx, engine_tree_config } = self;
82        let NodeBuilderWithComponents {
83            adapter: NodeTypesAdapter { database },
84            components_builder,
85            add_ons: AddOns { hooks, exexs: installed_exex, add_ons },
86            config,
87        } = target;
88        let NodeHooks { on_component_initialized, on_node_started, .. } = hooks;
89
90        // Create changeset cache that will be shared across the engine
91        let changeset_cache = ChangesetCache::new();
92
93        // setup the launch context
94        let ctx = ctx
95            .with_configured_globals(engine_tree_config.reserved_cpu_cores())
96            // load the toml config
97            .with_loaded_toml_config(config)?
98            // add resolved peers
99            .with_resolved_peers()?
100            // attach the database
101            .attach(database.clone())
102            // ensure certain settings take effect
103            .with_adjusted_configs()
104            // Create the provider factory with changeset cache
105            .with_provider_factory::<_, <CB::Components as NodeComponents<T>>::Evm>(changeset_cache.clone()).await?
106            .inspect(|_| {
107                info!(target: "reth::cli", "Database opened");
108            })
109            .with_prometheus_server().await?
110            .inspect(|this| {
111                debug!(target: "reth::cli", chain=%this.chain_id(), genesis=?this.genesis_hash(), "Initializing genesis");
112            })
113            .with_genesis()?
114            .inspect(|this: &LaunchContextWith<Attached<WithConfigs<<T::Types as NodeTypes>::ChainSpec>, _>>| {
115                info!(target: "reth::cli", "\n{}", this.chain_spec().display_hardforks());
116                let settings = this.provider_factory().cached_storage_settings();
117                info!(target: "reth::cli", ?settings, "Loaded storage settings");
118            })
119            .with_metrics_task()
120            // passing FullNodeTypes as type parameter here so that we can build
121            // later the components.
122            .with_blockchain_db::<T, _>(move |provider_factory| {
123                Ok(BlockchainProvider::new(provider_factory)?)
124            })?
125            .with_components(components_builder, on_component_initialized).await?;
126
127        // spawn exexs if any
128        let maybe_exex_manager_handle = ctx.launch_exex(installed_exex).await?;
129
130        // create pipeline
131        let network_handle = ctx.components().network().clone();
132        let network_client = network_handle.fetch_client().await?;
133        let (consensus_engine_tx, consensus_engine_rx) = unbounded_channel();
134
135        let node_config = ctx.node_config();
136
137        // We always assume that node is syncing after a restart
138        network_handle.update_sync_state(SyncState::Syncing);
139
140        let max_block = ctx.max_block(network_client.clone()).await?;
141
142        let static_file_producer = ctx.static_file_producer();
143        let static_file_producer_events = static_file_producer.lock().events();
144        info!(target: "reth::cli", "StaticFileProducer initialized");
145
146        let consensus = Arc::new(ctx.components().consensus().clone());
147
148        let pipeline = build_networked_pipeline(
149            &ctx.toml_config().stages,
150            network_client.clone(),
151            consensus.clone(),
152            ctx.provider_factory().clone(),
153            ctx.task_executor(),
154            ctx.sync_metrics_tx(),
155            ctx.prune_config(),
156            max_block,
157            static_file_producer,
158            ctx.components().evm_config().clone(),
159            maybe_exex_manager_handle.clone().unwrap_or_else(ExExManagerHandle::empty),
160            ctx.era_import_source(),
161        )?;
162
163        // The new engine writes directly to static files. This ensures that they're up to the tip.
164        pipeline.move_to_static_files()?;
165
166        let pipeline_events = pipeline.events();
167
168        let mut pruner_builder = ctx.pruner_builder();
169        if let Some(exex_manager_handle) = &maybe_exex_manager_handle {
170            pruner_builder =
171                pruner_builder.finished_exex_height(exex_manager_handle.finished_height());
172        }
173        let pruner = pruner_builder.build_with_provider_factory(ctx.provider_factory().clone());
174        let pruner_events = pruner.events();
175        info!(target: "reth::cli", prune_config=?ctx.prune_config(), "Pruner initialized");
176
177        let event_sender = EventSender::default();
178
179        let beacon_engine_handle = ConsensusEngineHandle::new(consensus_engine_tx.clone());
180
181        // extract the jwt secret from the args if possible
182        let jwt_secret = ctx.auth_jwt_secret()?;
183
184        let add_ons_ctx = AddOnsContext {
185            node: ctx.node_adapter().clone(),
186            config: ctx.node_config(),
187            beacon_engine_handle: beacon_engine_handle.clone(),
188            jwt_secret,
189            engine_events: event_sender.clone(),
190        };
191        let validator_builder = add_ons.engine_validator_builder();
192
193        // Build the engine validator with all required components
194        let engine_validator = validator_builder
195            .clone()
196            .build_tree_validator(&add_ons_ctx, engine_tree_config.clone(), changeset_cache.clone())
197            .await?;
198
199        // Create the consensus engine stream with optional reorg
200        let consensus_engine_stream = UnboundedReceiverStream::from(consensus_engine_rx)
201            .maybe_skip_fcu(node_config.debug.skip_fcu)
202            .maybe_skip_new_payload(node_config.debug.skip_new_payload)
203            .maybe_reorg(
204                ctx.blockchain_db().clone(),
205                ctx.components().evm_config().clone(),
206                || async {
207                    // Create a separate cache for reorg validator (not shared with main engine)
208                    let reorg_cache = ChangesetCache::new();
209                    validator_builder
210                        .build_tree_validator(&add_ons_ctx, engine_tree_config.clone(), reorg_cache)
211                        .await
212                },
213                node_config.debug.reorg_frequency,
214                node_config.debug.reorg_depth,
215            )
216            .await?
217            // Store messages _after_ skipping so that `replay-engine` command
218            // would replay only the messages that were observed by the engine
219            // during this run.
220            .maybe_store_messages(node_config.debug.engine_api_store.clone());
221
222        let mut engine_service = EngineService::new(
223            consensus.clone(),
224            ctx.chain_spec(),
225            network_client.clone(),
226            Box::pin(consensus_engine_stream),
227            pipeline,
228            Box::new(ctx.task_executor().clone()),
229            ctx.provider_factory().clone(),
230            ctx.blockchain_db().clone(),
231            pruner,
232            ctx.components().payload_builder_handle().clone(),
233            engine_validator,
234            engine_tree_config,
235            ctx.sync_metrics_tx(),
236            ctx.components().evm_config().clone(),
237            changeset_cache,
238        );
239
240        info!(target: "reth::cli", "Consensus engine initialized");
241
242        #[allow(clippy::needless_continue)]
243        let events = stream_select!(
244            event_sender.new_listener().map(Into::into),
245            pipeline_events.map(Into::into),
246            ctx.consensus_layer_events(),
247            pruner_events.map(Into::into),
248            static_file_producer_events.map(Into::into),
249        );
250
251        ctx.task_executor().spawn_critical(
252            "events task",
253            Box::pin(node::handle_events(
254                Some(Box::new(ctx.components().network().clone())),
255                Some(ctx.head().number),
256                events,
257            )),
258        );
259
260        let RpcHandle {
261            rpc_server_handles,
262            rpc_registry,
263            engine_events,
264            beacon_engine_handle,
265            engine_shutdown: _,
266        } = add_ons.launch_add_ons(add_ons_ctx).await?;
267
268        // Create engine shutdown handle
269        let (engine_shutdown, shutdown_rx) = EngineShutdown::new();
270
271        // Run consensus engine to completion
272        let initial_target = ctx.initial_backfill_target()?;
273        let mut built_payloads = ctx
274            .components()
275            .payload_builder_handle()
276            .subscribe()
277            .await
278            .map_err(|e| eyre::eyre!("Failed to subscribe to payload builder events: {:?}", e))?
279            .into_built_payload_stream()
280            .fuse();
281
282        let chainspec = ctx.chain_spec();
283        let provider = ctx.blockchain_db().clone();
284        let (exit, rx) = oneshot::channel();
285        let terminate_after_backfill = ctx.terminate_after_initial_backfill();
286        let startup_sync_state_idle = ctx.node_config().debug.startup_sync_state_idle;
287
288        info!(target: "reth::cli", "Starting consensus engine");
289        let consensus_engine = async move {
290            if let Some(initial_target) = initial_target {
291                debug!(target: "reth::cli", %initial_target,  "start backfill sync");
292                // network_handle's sync state is already initialized at Syncing
293                engine_service.orchestrator_mut().start_backfill_sync(initial_target);
294            } else if startup_sync_state_idle {
295                network_handle.update_sync_state(SyncState::Idle);
296            }
297
298            let mut res = Ok(());
299            let mut shutdown_rx = shutdown_rx.fuse();
300
301            // advance the chain and await payloads built locally to add into the engine api
302            // tree handler to prevent re-execution if that block is received as payload from
303            // the CL
304            loop {
305                tokio::select! {
306                    shutdown_req = &mut shutdown_rx => {
307                        if let Ok(req) = shutdown_req {
308                            debug!(target: "reth::cli", "received engine shutdown request");
309                            engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(
310                                FromOrchestrator::Terminate { tx: req.done_tx }.into()
311                            );
312                        }
313                    }
314                    payload = built_payloads.select_next_some() => {
315                        if let Some(executed_block) = payload.executed_block() {
316                            debug!(target: "reth::cli", block=?executed_block.recovered_block.num_hash(),  "inserting built payload");
317                            engine_service.orchestrator_mut().handler_mut().handler_mut().on_event(EngineApiRequest::InsertExecutedBlock(executed_block.into_executed_payload()).into());
318                        }
319                    }
320                    event = engine_service.next() => {
321                        let Some(event) = event else { break };
322                        debug!(target: "reth::cli", "Event: {event}");
323                        match event {
324                            ChainEvent::BackfillSyncFinished => {
325                                if terminate_after_backfill {
326                                    debug!(target: "reth::cli", "Terminating after initial backfill");
327                                    break
328                                }
329                                if startup_sync_state_idle {
330                                    network_handle.update_sync_state(SyncState::Idle);
331                                }
332                            }
333                            ChainEvent::BackfillSyncStarted => {
334                                network_handle.update_sync_state(SyncState::Syncing);
335                            }
336                            ChainEvent::FatalError => {
337                                error!(target: "reth::cli", "Fatal error in consensus engine");
338                                res = Err(eyre::eyre!("Fatal error in consensus engine"));
339                                break
340                            }
341                            ChainEvent::Handler(ev) => {
342                                if let Some(head) = ev.canonical_header() {
343                                    // Once we're progressing via live sync, we can consider the node is not syncing anymore
344                                    network_handle.update_sync_state(SyncState::Idle);
345                                    let head_block = Head {
346                                        number: head.number(),
347                                        hash: head.hash(),
348                                        difficulty: head.difficulty(),
349                                        timestamp: head.timestamp(),
350                                        total_difficulty: chainspec.final_paris_total_difficulty()
351                                            .filter(|_| chainspec.is_paris_active_at_block(head.number()))
352                                            .unwrap_or_default(),
353                                    };
354                                    network_handle.update_status(head_block);
355
356                                    let updated = BlockRangeUpdate {
357                                        earliest: provider.earliest_block_number().unwrap_or_default(),
358                                        latest: head.number(),
359                                        latest_hash: head.hash(),
360                                    };
361                                    network_handle.update_block_range(updated);
362                                }
363                                event_sender.notify(ev);
364                            }
365                        }
366                    }
367                }
368            }
369
370            let _ = exit.send(res);
371        };
372        ctx.task_executor().spawn_critical("consensus engine", Box::pin(consensus_engine));
373
374        let engine_events_for_ethstats = engine_events.new_listener();
375
376        let full_node = FullNode {
377            evm_config: ctx.components().evm_config().clone(),
378            pool: ctx.components().pool().clone(),
379            network: ctx.components().network().clone(),
380            provider: ctx.node_adapter().provider.clone(),
381            payload_builder_handle: ctx.components().payload_builder_handle().clone(),
382            task_executor: ctx.task_executor().clone(),
383            config: ctx.node_config().clone(),
384            data_dir: ctx.data_dir().clone(),
385            add_ons_handle: RpcHandle {
386                rpc_server_handles,
387                rpc_registry,
388                engine_events,
389                beacon_engine_handle,
390                engine_shutdown,
391            },
392        };
393        // Notify on node started
394        on_node_started.on_event(FullNode::clone(&full_node))?;
395
396        ctx.spawn_ethstats(engine_events_for_ethstats).await?;
397
398        let handle = NodeHandle {
399            node_exit_future: NodeExitFuture::new(
400                async { rx.await? },
401                full_node.config.debug.terminate,
402            ),
403            node: full_node,
404        };
405
406        Ok(handle)
407    }
408}
409
410impl<T, CB, AO> LaunchNode<NodeBuilderWithComponents<T, CB, AO>> for EngineNodeLauncher
411where
412    T: FullNodeTypes<
413        Types: NodeTypesForProvider,
414        Provider = BlockchainProvider<
415            NodeTypesWithDBAdapter<<T as FullNodeTypes>::Types, <T as FullNodeTypes>::DB>,
416        >,
417    >,
418    CB: NodeComponentsBuilder<T> + 'static,
419    AO: RethRpcAddOns<NodeAdapter<T, CB::Components>>
420        + EngineValidatorAddOn<NodeAdapter<T, CB::Components>>
421        + 'static,
422{
423    type Node = NodeHandle<NodeAdapter<T, CB::Components>, AO>;
424    type Future = Pin<Box<dyn Future<Output = eyre::Result<Self::Node>> + Send>>;
425
426    fn launch_node(self, target: NodeBuilderWithComponents<T, CB, AO>) -> Self::Future {
427        Box::pin(self.launch_node(target))
428    }
429}