Skip to main content

reth_network/
manager.rs

1//! High level network management.
2//!
3//! The [`NetworkManager`] contains the state of the network as a whole. It controls how connections
4//! are handled and keeps track of connections to peers.
5//!
6//! ## Capabilities
7//!
8//! The network manages peers depending on their announced capabilities via their `RLPx` sessions. Most importantly the [Ethereum Wire Protocol](https://github.com/ethereum/devp2p/blob/master/caps/eth.md)(`eth`).
9//!
10//! ## Overview
11//!
12//! The [`NetworkManager`] is responsible for advancing the state of the `network`. The `network` is
13//! made up of peer-to-peer connections between nodes that are available on the same network.
14//! Responsible for peer discovery is ethereum's discovery protocol (discv4, discv5). If the address
15//! (IP+port) of our node is published via discovery, remote peers can initiate inbound connections
16//! to the local node. Once a (tcp) connection is established, both peers start to authenticate a [RLPx session](https://github.com/ethereum/devp2p/blob/master/rlpx.md) via a handshake. If the handshake was successful, both peers announce their capabilities and are now ready to exchange sub-protocol messages via the `RLPx` session.
17
18use crate::{
19    budget::{DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL, DEFAULT_BUDGET_TRY_DRAIN_SWARM},
20    config::NetworkConfig,
21    discovery::Discovery,
22    error::{NetworkError, ServiceKind},
23    eth_requests::IncomingEthRequest,
24    import::{BlockImport, BlockImportEvent, BlockImportOutcome, BlockValidation, NewBlockEvent},
25    listener::ConnectionListener,
26    message::{NewBlockMessage, PeerMessage},
27    metrics::{
28        BackedOffPeersMetrics, ClosedSessionsMetrics, DirectionalDisconnectMetrics, NetworkMetrics,
29        PendingSessionFailureMetrics, NETWORK_POOL_TRANSACTIONS_SCOPE,
30    },
31    network::{NetworkHandle, NetworkHandleMessage},
32    peers::{BackoffReason, PeersManager},
33    poll_nested_stream_with_budget,
34    protocol::IntoRlpxSubProtocol,
35    required_block_filter::RequiredBlockFilter,
36    session::SessionManager,
37    state::NetworkState,
38    swarm::{Swarm, SwarmEvent},
39    transactions::NetworkTransactionEvent,
40    FetchClient, NetworkBuilder,
41};
42use futures::{Future, StreamExt};
43use parking_lot::Mutex;
44use reth_chainspec::EnrForkIdEntry;
45use reth_eth_wire::{DisconnectReason, EthNetworkPrimitives, NetworkPrimitives};
46use reth_fs_util::{self as fs, FsPathError};
47use reth_metrics::common::mpsc::UnboundedMeteredSender;
48use reth_network_api::{
49    events::{PeerEvent, SessionInfo},
50    test_utils::PeersHandle,
51    EthProtocolInfo, NetworkEvent, NetworkStatus, PeerInfo, PeerRequest,
52};
53use reth_network_peers::{NodeRecord, PeerId};
54use reth_network_types::ReputationChangeKind;
55use reth_storage_api::BlockNumReader;
56use reth_tasks::shutdown::GracefulShutdown;
57use reth_tokio_util::EventSender;
58use secp256k1::SecretKey;
59use std::{
60    net::SocketAddr,
61    path::Path,
62    pin::Pin,
63    sync::{
64        atomic::{AtomicU64, AtomicUsize, Ordering},
65        Arc,
66    },
67    task::{Context, Poll},
68    time::{Duration, Instant},
69};
70use tokio::sync::mpsc::{self, error::TrySendError};
71use tokio_stream::wrappers::UnboundedReceiverStream;
72use tracing::{debug, error, trace, warn};
73
74#[cfg_attr(doc, aquamarine::aquamarine)]
75// TODO: Inlined diagram due to a bug in aquamarine library, should become an include when it's
76// fixed. See https://github.com/mersinvald/aquamarine/issues/50
77// include_mmd!("docs/mermaid/network-manager.mmd")
78/// Manages the _entire_ state of the network.
79///
80/// This is an endless [`Future`] that consistently drives the state of the entire network forward.
81///
82/// The [`NetworkManager`] is the container type for all parts involved with advancing the network.
83///
84/// ```mermaid
85/// graph TB
86///   handle(NetworkHandle)
87///   events(NetworkEvents)
88///   transactions(Transactions Task)
89///   ethrequest(ETH Request Task)
90///   discovery(Discovery Task)
91///   subgraph NetworkManager
92///     direction LR
93///     subgraph Swarm
94///         direction TB
95///         B1[(Session Manager)]
96///         B2[(Connection Listener)]
97///         B3[(Network State)]
98///     end
99///  end
100///  handle <--> |request response channel| NetworkManager
101///  NetworkManager --> |Network events| events
102///  transactions <--> |transactions| NetworkManager
103///  ethrequest <--> |ETH request handing| NetworkManager
104///  discovery --> |Discovered peers| NetworkManager
105/// ```
106#[derive(Debug)]
107#[must_use = "The NetworkManager does nothing unless polled"]
108pub struct NetworkManager<N: NetworkPrimitives = EthNetworkPrimitives> {
109    /// The type that manages the actual network part, which includes connections.
110    swarm: Swarm<N>,
111    /// Underlying network handle that can be shared.
112    handle: NetworkHandle<N>,
113    /// Receiver half of the command channel set up between this type and the [`NetworkHandle`]
114    from_handle_rx: UnboundedReceiverStream<NetworkHandleMessage<N>>,
115    /// Handles block imports according to the `eth` protocol.
116    block_import: Box<dyn BlockImport<N::NewBlockPayload>>,
117    /// Sender for high level network events.
118    event_sender: EventSender<NetworkEvent<PeerRequest<N>>>,
119    /// Sender half to send events to the
120    /// [`TransactionsManager`](crate::transactions::TransactionsManager) task, if configured.
121    to_transactions_manager: Option<UnboundedMeteredSender<NetworkTransactionEvent<N>>>,
122    /// Sender half to send events to the
123    /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler) task, if configured.
124    ///
125    /// The channel that originally receives and bundles all requests from all sessions is already
126    /// bounded. However, since handling an eth request is more I/O intensive than delegating
127    /// them from the bounded channel to the eth-request channel, it is possible that this
128    /// builds up if the node is flooded with requests.
129    ///
130    /// Even though nonmalicious requests are relatively cheap, it's possible to craft
131    /// body requests with bogus data up until the allowed max message size limit.
132    /// Thus, we use a bounded channel here to avoid unbounded build up if the node is flooded with
133    /// requests. This channel size is set at
134    /// [`ETH_REQUEST_CHANNEL_CAPACITY`](crate::builder::ETH_REQUEST_CHANNEL_CAPACITY)
135    to_eth_request_handler: Option<mpsc::Sender<IncomingEthRequest<N>>>,
136    /// Tracks the number of active session (connected peers).
137    ///
138    /// This is updated via internal events and shared via `Arc` with the [`NetworkHandle`]
139    /// Updated by the `NetworkWorker` and loaded by the `NetworkService`.
140    num_active_peers: Arc<AtomicUsize>,
141    /// Metrics for the Network
142    metrics: NetworkMetrics,
143    /// Disconnect metrics for the Network, split by connection direction.
144    disconnect_metrics: DirectionalDisconnectMetrics,
145    /// Closed sessions metrics, split by direction.
146    closed_sessions_metrics: ClosedSessionsMetrics,
147    /// Pending session failure metrics, split by direction.
148    pending_session_failure_metrics: PendingSessionFailureMetrics,
149    /// Backed off peers metrics, split by reason.
150    backed_off_peers_metrics: BackedOffPeersMetrics,
151}
152
153impl NetworkManager {
154    /// Creates the manager of a new network with [`EthNetworkPrimitives`] types.
155    ///
156    /// ```no_run
157    /// # async fn f() {
158    /// use reth_chainspec::MAINNET;
159    /// use reth_network::{NetworkConfig, NetworkManager};
160    /// let config =
161    ///     NetworkConfig::builder_with_rng_secret_key().build_with_noop_provider(MAINNET.clone());
162    /// let manager = NetworkManager::eth(config).await;
163    /// # }
164    /// ```
165    pub async fn eth<C: BlockNumReader + 'static>(
166        config: NetworkConfig<C, EthNetworkPrimitives>,
167    ) -> Result<Self, NetworkError> {
168        Self::new(config).await
169    }
170}
171
172impl<N: NetworkPrimitives> NetworkManager<N> {
173    /// Sets the dedicated channel for events intended for the
174    /// [`TransactionsManager`](crate::transactions::TransactionsManager).
175    pub fn with_transactions(
176        mut self,
177        tx: mpsc::UnboundedSender<NetworkTransactionEvent<N>>,
178    ) -> Self {
179        self.set_transactions(tx);
180        self
181    }
182
183    /// Sets the dedicated channel for events intended for the
184    /// [`TransactionsManager`](crate::transactions::TransactionsManager).
185    pub fn set_transactions(&mut self, tx: mpsc::UnboundedSender<NetworkTransactionEvent<N>>) {
186        self.to_transactions_manager =
187            Some(UnboundedMeteredSender::new(tx, NETWORK_POOL_TRANSACTIONS_SCOPE));
188    }
189
190    /// Sets the dedicated channel for events intended for the
191    /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler).
192    pub fn with_eth_request_handler(mut self, tx: mpsc::Sender<IncomingEthRequest<N>>) -> Self {
193        self.set_eth_request_handler(tx);
194        self
195    }
196
197    /// Sets the dedicated channel for events intended for the
198    /// [`EthRequestHandler`](crate::eth_requests::EthRequestHandler).
199    pub fn set_eth_request_handler(&mut self, tx: mpsc::Sender<IncomingEthRequest<N>>) {
200        self.to_eth_request_handler = Some(tx);
201    }
202
203    /// Adds an additional protocol handler to the `RLPx` sub-protocol list.
204    pub fn add_rlpx_sub_protocol(&mut self, protocol: impl IntoRlpxSubProtocol) {
205        self.swarm.add_rlpx_sub_protocol(protocol)
206    }
207
208    /// Returns the [`NetworkHandle`] that can be cloned and shared.
209    ///
210    /// The [`NetworkHandle`] can be used to interact with this [`NetworkManager`]
211    pub const fn handle(&self) -> &NetworkHandle<N> {
212        &self.handle
213    }
214
215    /// Returns the secret key used for authenticating sessions.
216    pub const fn secret_key(&self) -> SecretKey {
217        self.swarm.sessions().secret_key()
218    }
219
220    #[inline]
221    fn update_poll_metrics(&self, start: Instant, poll_durations: NetworkManagerPollDurations) {
222        let metrics = &self.metrics;
223
224        let NetworkManagerPollDurations { acc_network_handle, acc_swarm } = poll_durations;
225
226        // update metrics for whole poll function
227        metrics.duration_poll_network_manager.set(start.elapsed().as_secs_f64());
228        // update poll metrics for nested items
229        metrics.acc_duration_poll_network_handle.set(acc_network_handle.as_secs_f64());
230        metrics.acc_duration_poll_swarm.set(acc_swarm.as_secs_f64());
231    }
232
233    /// Creates the manager of a new network.
234    ///
235    /// The [`NetworkManager`] is an endless future that needs to be polled in order to advance the
236    /// state of the entire network.
237    pub async fn new<C: BlockNumReader + 'static>(
238        config: NetworkConfig<C, N>,
239    ) -> Result<Self, NetworkError> {
240        let NetworkConfig {
241            client,
242            secret_key,
243            discovery_v4_addr,
244            mut discovery_v4_config,
245            mut discovery_v5_config,
246            listener_addr,
247            peers_config,
248            sessions_config,
249            chain_id,
250            block_import,
251            network_mode,
252            boot_nodes,
253            executor,
254            hello_message,
255            status,
256            fork_filter,
257            dns_discovery_config,
258            extra_protocols,
259            tx_gossip_disabled,
260            transactions_manager_config: _,
261            nat,
262            handshake,
263            required_block_hashes,
264        } = config;
265
266        let peers_manager = PeersManager::new(peers_config);
267        let peers_handle = peers_manager.handle();
268
269        let incoming = ConnectionListener::bind(listener_addr).await.map_err(|err| {
270            NetworkError::from_io_error(err, ServiceKind::Listener(listener_addr))
271        })?;
272
273        // retrieve the tcp address of the socket
274        let listener_addr = incoming.local_address();
275
276        // resolve boot nodes
277        let resolved_boot_nodes =
278            futures::future::try_join_all(boot_nodes.iter().map(|record| record.resolve())).await?;
279
280        if let Some(disc_config) = discovery_v4_config.as_mut() {
281            // merge configured boot nodes
282            disc_config.bootstrap_nodes.extend(resolved_boot_nodes.clone());
283            // add the forkid entry for EIP-868, but wrap it in an `EnrForkIdEntry` for proper
284            // encoding
285            disc_config.add_eip868_pair("eth", EnrForkIdEntry::from(status.forkid));
286        }
287
288        if let Some(discv5) = discovery_v5_config.as_mut() {
289            // merge configured boot nodes
290            discv5.extend_unsigned_boot_nodes(resolved_boot_nodes)
291        }
292
293        let discovery = Discovery::new(
294            listener_addr,
295            discovery_v4_addr,
296            secret_key,
297            discovery_v4_config,
298            discovery_v5_config,
299            dns_discovery_config,
300        )
301        .await?;
302        // need to retrieve the addr here since provided port could be `0`
303        let local_peer_id = discovery.local_id();
304        let discv4 = discovery.discv4();
305        let discv5 = discovery.discv5();
306
307        let num_active_peers = Arc::new(AtomicUsize::new(0));
308
309        let sessions = SessionManager::new(
310            secret_key,
311            sessions_config,
312            executor,
313            status,
314            hello_message,
315            fork_filter,
316            extra_protocols,
317            handshake,
318        );
319
320        let state = NetworkState::new(
321            crate::state::BlockNumReader::new(client),
322            discovery,
323            peers_manager,
324            Arc::clone(&num_active_peers),
325        );
326
327        let swarm = Swarm::new(incoming, sessions, state);
328
329        let (to_manager_tx, from_handle_rx) = mpsc::unbounded_channel();
330
331        let event_sender: EventSender<NetworkEvent<PeerRequest<N>>> = Default::default();
332
333        let handle = NetworkHandle::new(
334            Arc::clone(&num_active_peers),
335            Arc::new(Mutex::new(listener_addr)),
336            to_manager_tx,
337            secret_key,
338            local_peer_id,
339            peers_handle,
340            network_mode,
341            Arc::new(AtomicU64::new(chain_id)),
342            tx_gossip_disabled,
343            discv4,
344            discv5,
345            event_sender.clone(),
346            nat,
347        );
348
349        // Spawn required block peer filter if configured
350        if !required_block_hashes.is_empty() {
351            let filter = RequiredBlockFilter::new(handle.clone(), required_block_hashes);
352            filter.spawn();
353        }
354
355        Ok(Self {
356            swarm,
357            handle,
358            from_handle_rx: UnboundedReceiverStream::new(from_handle_rx),
359            block_import,
360            event_sender,
361            to_transactions_manager: None,
362            to_eth_request_handler: None,
363            num_active_peers,
364            metrics: Default::default(),
365            disconnect_metrics: Default::default(),
366            closed_sessions_metrics: Default::default(),
367            pending_session_failure_metrics: Default::default(),
368            backed_off_peers_metrics: Default::default(),
369        })
370    }
371
372    /// Create a new [`NetworkManager`] instance and start a [`NetworkBuilder`] to configure all
373    /// components of the network
374    ///
375    /// ```
376    /// use reth_network::{
377    ///     config::rng_secret_key, EthNetworkPrimitives, NetworkConfig, NetworkManager,
378    /// };
379    /// use reth_network_peers::mainnet_nodes;
380    /// use reth_storage_api::noop::NoopProvider;
381    /// use reth_transaction_pool::TransactionPool;
382    /// async fn launch<Pool: TransactionPool>(pool: Pool) {
383    ///     // This block provider implementation is used for testing purposes.
384    ///     let client = NoopProvider::default();
385    ///
386    ///     // The key that's used for encrypting sessions and to identify our node.
387    ///     let local_key = rng_secret_key();
388    ///
389    ///     let config = NetworkConfig::<_, EthNetworkPrimitives>::builder(local_key)
390    ///         .boot_nodes(mainnet_nodes())
391    ///         .build(client.clone());
392    ///     let transactions_manager_config = config.transactions_manager_config.clone();
393    ///
394    ///     // create the network instance
395    ///     let (handle, network, transactions, request_handler) = NetworkManager::builder(config)
396    ///         .await
397    ///         .unwrap()
398    ///         .transactions(pool, transactions_manager_config)
399    ///         .request_handler(client)
400    ///         .split_with_handle();
401    /// }
402    /// ```
403    pub async fn builder<C: BlockNumReader + 'static>(
404        config: NetworkConfig<C, N>,
405    ) -> Result<NetworkBuilder<(), (), N>, NetworkError> {
406        let network = Self::new(config).await?;
407        Ok(network.into_builder())
408    }
409
410    /// Create a [`NetworkBuilder`] to configure all components of the network
411    pub const fn into_builder(self) -> NetworkBuilder<(), (), N> {
412        NetworkBuilder { network: self, transactions: (), request_handler: () }
413    }
414
415    /// Returns the [`SocketAddr`] that listens for incoming tcp connections.
416    pub const fn local_addr(&self) -> SocketAddr {
417        self.swarm.listener().local_address()
418    }
419
420    /// How many peers we're currently connected to.
421    pub fn num_connected_peers(&self) -> usize {
422        self.swarm.state().num_active_peers()
423    }
424
425    /// Returns the [`PeerId`] used in the network.
426    pub fn peer_id(&self) -> &PeerId {
427        self.handle.peer_id()
428    }
429
430    /// Returns an iterator over all peers in the peer set.
431    pub fn all_peers(&self) -> impl Iterator<Item = NodeRecord> + '_ {
432        self.swarm.state().peers().iter_peers()
433    }
434
435    /// Returns the number of peers in the peer set.
436    pub fn num_known_peers(&self) -> usize {
437        self.swarm.state().peers().num_known_peers()
438    }
439
440    /// Returns a new [`PeersHandle`] that can be cloned and shared.
441    ///
442    /// The [`PeersHandle`] can be used to interact with the network's peer set.
443    pub fn peers_handle(&self) -> PeersHandle {
444        self.swarm.state().peers().handle()
445    }
446
447    /// Collect the peers from the [`NetworkManager`] and write them to the given
448    /// `persistent_peers_file`.
449    pub fn write_peers_to_file(&self, persistent_peers_file: &Path) -> Result<(), FsPathError> {
450        let known_peers = self.all_peers().collect::<Vec<_>>();
451        persistent_peers_file.parent().map(fs::create_dir_all).transpose()?;
452        reth_fs_util::write_json_file(persistent_peers_file, &known_peers)?;
453        Ok(())
454    }
455
456    /// Returns a new [`FetchClient`] that can be cloned and shared.
457    ///
458    /// The [`FetchClient`] is the entrypoint for sending requests to the network.
459    pub fn fetch_client(&self) -> FetchClient<N> {
460        self.swarm.state().fetch_client()
461    }
462
463    /// Returns the current [`NetworkStatus`] for the local node.
464    pub fn status(&self) -> NetworkStatus {
465        let sessions = self.swarm.sessions();
466        let status = sessions.status();
467        let hello_message = sessions.hello_message();
468
469        #[expect(deprecated)]
470        NetworkStatus {
471            client_version: hello_message.client_version,
472            protocol_version: hello_message.protocol_version as u64,
473            eth_protocol_info: EthProtocolInfo {
474                difficulty: None,
475                head: status.blockhash,
476                network: status.chain.id(),
477                genesis: status.genesis,
478                config: Default::default(),
479            },
480            capabilities: hello_message
481                .protocols
482                .into_iter()
483                .map(|protocol| protocol.cap)
484                .collect(),
485        }
486    }
487
488    /// Sends an event to the [`TransactionsManager`](crate::transactions::TransactionsManager) if
489    /// configured.
490    fn notify_tx_manager(&self, event: NetworkTransactionEvent<N>) {
491        if let Some(ref tx) = self.to_transactions_manager {
492            let _ = tx.send(event);
493        }
494    }
495
496    /// Sends an event to the [`EthRequestManager`](crate::eth_requests::EthRequestHandler) if
497    /// configured.
498    fn delegate_eth_request(&self, event: IncomingEthRequest<N>) {
499        if let Some(ref reqs) = self.to_eth_request_handler {
500            let _ = reqs.try_send(event).map_err(|e| {
501                if let TrySendError::Full(_) = e {
502                    debug!(target:"net", "EthRequestHandler channel is full!");
503                    self.metrics.total_dropped_eth_requests_at_full_capacity.increment(1);
504                }
505            });
506        }
507    }
508
509    /// Handle an incoming request from the peer
510    fn on_eth_request(&self, peer_id: PeerId, req: PeerRequest<N>) {
511        match req {
512            PeerRequest::GetBlockHeaders { request, response } => {
513                self.delegate_eth_request(IncomingEthRequest::GetBlockHeaders {
514                    peer_id,
515                    request,
516                    response,
517                })
518            }
519            PeerRequest::GetBlockBodies { request, response } => {
520                self.delegate_eth_request(IncomingEthRequest::GetBlockBodies {
521                    peer_id,
522                    request,
523                    response,
524                })
525            }
526            PeerRequest::GetNodeData { request, response } => {
527                self.delegate_eth_request(IncomingEthRequest::GetNodeData {
528                    peer_id,
529                    request,
530                    response,
531                })
532            }
533            PeerRequest::GetReceipts { request, response } => {
534                self.delegate_eth_request(IncomingEthRequest::GetReceipts {
535                    peer_id,
536                    request,
537                    response,
538                })
539            }
540            PeerRequest::GetReceipts69 { request, response } => {
541                self.delegate_eth_request(IncomingEthRequest::GetReceipts69 {
542                    peer_id,
543                    request,
544                    response,
545                })
546            }
547            PeerRequest::GetReceipts70 { request, response } => {
548                self.delegate_eth_request(IncomingEthRequest::GetReceipts70 {
549                    peer_id,
550                    request,
551                    response,
552                })
553            }
554            PeerRequest::GetPooledTransactions { request, response } => {
555                self.notify_tx_manager(NetworkTransactionEvent::GetPooledTransactions {
556                    peer_id,
557                    request,
558                    response,
559                });
560            }
561        }
562    }
563
564    /// Invoked after a `NewBlock` message from the peer was validated
565    fn on_block_import_result(&mut self, event: BlockImportEvent<N::NewBlockPayload>) {
566        match event {
567            BlockImportEvent::Announcement(validation) => match validation {
568                BlockValidation::ValidHeader { block } => {
569                    self.swarm.state_mut().announce_new_block(block);
570                }
571                BlockValidation::ValidBlock { block } => {
572                    self.swarm.state_mut().announce_new_block_hash(block);
573                }
574            },
575            BlockImportEvent::Outcome(outcome) => {
576                let BlockImportOutcome { peer, result } = outcome;
577                match result {
578                    Ok(validated_block) => match validated_block {
579                        BlockValidation::ValidHeader { block } => {
580                            self.swarm.state_mut().update_peer_block(
581                                &peer,
582                                block.hash,
583                                block.number(),
584                            );
585                            self.swarm.state_mut().announce_new_block(block);
586                        }
587                        BlockValidation::ValidBlock { block } => {
588                            self.swarm.state_mut().announce_new_block_hash(block);
589                        }
590                    },
591                    Err(_err) => {
592                        self.swarm
593                            .state_mut()
594                            .peers_mut()
595                            .apply_reputation_change(&peer, ReputationChangeKind::BadBlock);
596                    }
597                }
598            }
599        }
600    }
601
602    /// Enforces [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p) consensus rules for the network protocol
603    ///
604    /// Depending on the mode of the network:
605    ///    - disconnect peer if in POS
606    ///    - execute the closure if in POW
607    fn within_pow_or_disconnect<F>(&mut self, peer_id: PeerId, only_pow: F)
608    where
609        F: FnOnce(&mut Self),
610    {
611        // reject message in POS
612        if self.handle.mode().is_stake() {
613            // connections to peers which send invalid messages should be terminated
614            self.swarm
615                .sessions_mut()
616                .disconnect(peer_id, Some(DisconnectReason::SubprotocolSpecific));
617        } else {
618            only_pow(self);
619        }
620    }
621
622    /// Handles a received Message from the peer's session.
623    fn on_peer_message(&mut self, peer_id: PeerId, msg: PeerMessage<N>) {
624        match msg {
625            PeerMessage::NewBlockHashes(hashes) => {
626                self.within_pow_or_disconnect(peer_id, |this| {
627                    // update peer's state, to track what blocks this peer has seen
628                    this.swarm.state_mut().on_new_block_hashes(peer_id, hashes.0.clone());
629                    // start block import process for the hashes
630                    this.block_import.on_new_block(peer_id, NewBlockEvent::Hashes(hashes));
631                })
632            }
633            PeerMessage::NewBlock(block) => {
634                self.within_pow_or_disconnect(peer_id, move |this| {
635                    this.swarm.state_mut().on_new_block(peer_id, block.hash);
636                    // start block import process
637                    this.block_import.on_new_block(peer_id, NewBlockEvent::Block(block));
638                });
639            }
640            PeerMessage::PooledTransactions(msg) => {
641                self.notify_tx_manager(NetworkTransactionEvent::IncomingPooledTransactionHashes {
642                    peer_id,
643                    msg,
644                });
645            }
646            PeerMessage::EthRequest(req) => {
647                self.on_eth_request(peer_id, req);
648            }
649            PeerMessage::ReceivedTransaction(msg) => {
650                self.notify_tx_manager(NetworkTransactionEvent::IncomingTransactions {
651                    peer_id,
652                    msg,
653                });
654            }
655            PeerMessage::SendTransactions(_) => {
656                unreachable!("Not emitted by session")
657            }
658            PeerMessage::BlockRangeUpdated(_) => {}
659            PeerMessage::Other(other) => {
660                debug!(target: "net", message_id=%other.id, "Ignoring unsupported message");
661            }
662        }
663    }
664
665    /// Handler for received messages from a handle
666    fn on_handle_message(&mut self, msg: NetworkHandleMessage<N>) {
667        match msg {
668            NetworkHandleMessage::DiscoveryListener(tx) => {
669                self.swarm.state_mut().discovery_mut().add_listener(tx);
670            }
671            NetworkHandleMessage::AnnounceBlock(block, hash) => {
672                if self.handle.mode().is_stake() {
673                    // See [EIP-3675](https://eips.ethereum.org/EIPS/eip-3675#devp2p)
674                    warn!(target: "net", "Peer performed block propagation, but it is not supported in proof of stake (EIP-3675)");
675                    return
676                }
677                let msg = NewBlockMessage { hash, block: Arc::new(block) };
678                self.swarm.state_mut().announce_new_block(msg);
679            }
680            NetworkHandleMessage::EthRequest { peer_id, request } => {
681                self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::EthRequest(request))
682            }
683            NetworkHandleMessage::SendTransaction { peer_id, msg } => {
684                self.swarm.sessions_mut().send_message(&peer_id, PeerMessage::SendTransactions(msg))
685            }
686            NetworkHandleMessage::SendPooledTransactionHashes { peer_id, msg } => self
687                .swarm
688                .sessions_mut()
689                .send_message(&peer_id, PeerMessage::PooledTransactions(msg)),
690            NetworkHandleMessage::AddTrustedPeerId(peer_id) => {
691                self.swarm.state_mut().add_trusted_peer_id(peer_id);
692            }
693            NetworkHandleMessage::AddPeerAddress(peer, kind, addr) => {
694                // only add peer if we are not shutting down
695                if !self.swarm.is_shutting_down() {
696                    self.swarm.state_mut().add_peer_kind(peer, kind, addr);
697                }
698            }
699            NetworkHandleMessage::RemovePeer(peer_id, kind) => {
700                self.swarm.state_mut().remove_peer_kind(peer_id, kind);
701            }
702            NetworkHandleMessage::DisconnectPeer(peer_id, reason) => {
703                self.swarm.sessions_mut().disconnect(peer_id, reason);
704            }
705            NetworkHandleMessage::ConnectPeer(peer_id, kind, addr) => {
706                self.swarm.state_mut().add_and_connect(peer_id, kind, addr);
707            }
708            NetworkHandleMessage::SetNetworkState(net_state) => {
709                // Sets network connection state between Active and Hibernate.
710                // If hibernate stops the node to fill new outbound
711                // connections, this is beneficial for sync stages that do not require a network
712                // connection.
713                self.swarm.on_network_state_change(net_state);
714            }
715
716            NetworkHandleMessage::Shutdown(tx) => {
717                self.perform_network_shutdown();
718                let _ = tx.send(());
719            }
720            NetworkHandleMessage::ReputationChange(peer_id, kind) => {
721                self.swarm.state_mut().peers_mut().apply_reputation_change(&peer_id, kind);
722            }
723            NetworkHandleMessage::GetReputationById(peer_id, tx) => {
724                let _ = tx.send(self.swarm.state_mut().peers().get_reputation(&peer_id));
725            }
726            NetworkHandleMessage::FetchClient(tx) => {
727                let _ = tx.send(self.fetch_client());
728            }
729            NetworkHandleMessage::GetStatus(tx) => {
730                let _ = tx.send(self.status());
731            }
732            NetworkHandleMessage::StatusUpdate { head } => {
733                if let Some(transition) = self.swarm.sessions_mut().on_status_update(head) {
734                    self.swarm.state_mut().update_fork_id(transition.current);
735                }
736            }
737            NetworkHandleMessage::GetPeerInfos(tx) => {
738                let _ = tx.send(self.get_peer_infos());
739            }
740            NetworkHandleMessage::GetPeerInfoById(peer_id, tx) => {
741                let _ = tx.send(self.get_peer_info_by_id(peer_id));
742            }
743            NetworkHandleMessage::GetPeerInfosByIds(peer_ids, tx) => {
744                let _ = tx.send(self.get_peer_infos_by_ids(peer_ids));
745            }
746            NetworkHandleMessage::GetPeerInfosByPeerKind(kind, tx) => {
747                let peer_ids = self.swarm.state().peers().peers_by_kind(kind);
748                let _ = tx.send(self.get_peer_infos_by_ids(peer_ids));
749            }
750            NetworkHandleMessage::AddRlpxSubProtocol(proto) => self.add_rlpx_sub_protocol(proto),
751            NetworkHandleMessage::GetTransactionsHandle(tx) => {
752                if let Some(ref tx_inner) = self.to_transactions_manager {
753                    let _ = tx_inner.send(NetworkTransactionEvent::GetTransactionsHandle(tx));
754                } else {
755                    let _ = tx.send(None);
756                }
757            }
758            NetworkHandleMessage::InternalBlockRangeUpdate(block_range_update) => {
759                self.swarm.sessions_mut().update_advertised_block_range(block_range_update);
760            }
761            NetworkHandleMessage::EthMessage { peer_id, message } => {
762                self.swarm.sessions_mut().send_message(&peer_id, message)
763            }
764        }
765    }
766
767    fn on_swarm_event(&mut self, event: SwarmEvent<N>) {
768        // handle event
769        match event {
770            SwarmEvent::ValidMessage { peer_id, message } => self.on_peer_message(peer_id, message),
771            SwarmEvent::TcpListenerClosed { remote_addr } => {
772                trace!(target: "net", ?remote_addr, "TCP listener closed.");
773            }
774            SwarmEvent::TcpListenerError(err) => {
775                trace!(target: "net", %err, "TCP connection error.");
776            }
777            SwarmEvent::IncomingTcpConnection { remote_addr, session_id } => {
778                trace!(target: "net", ?session_id, ?remote_addr, "Incoming connection");
779                self.metrics.total_incoming_connections.increment(1);
780                self.metrics
781                    .incoming_connections
782                    .set(self.swarm.state().peers().num_inbound_connections() as f64);
783            }
784            SwarmEvent::OutgoingTcpConnection { remote_addr, peer_id } => {
785                trace!(target: "net", ?remote_addr, ?peer_id, "Starting outbound connection.");
786                self.metrics.total_outgoing_connections.increment(1);
787                self.update_pending_connection_metrics()
788            }
789            SwarmEvent::SessionEstablished {
790                peer_id,
791                remote_addr,
792                client_version,
793                capabilities,
794                version,
795                messages,
796                status,
797                direction,
798            } => {
799                let total_active = self.num_active_peers.fetch_add(1, Ordering::Relaxed) + 1;
800                self.metrics.connected_peers.set(total_active as f64);
801                debug!(
802                    target: "net",
803                    ?remote_addr,
804                    %client_version,
805                    ?peer_id,
806                    ?total_active,
807                    kind=%direction,
808                    peer_enode=%NodeRecord::new(remote_addr, peer_id),
809                    "Session established"
810                );
811
812                if direction.is_incoming() {
813                    self.swarm
814                        .state_mut()
815                        .peers_mut()
816                        .on_incoming_session_established(peer_id, remote_addr);
817                }
818
819                if direction.is_outgoing() {
820                    self.swarm.state_mut().peers_mut().on_active_outgoing_established(peer_id);
821                }
822
823                self.update_active_connection_metrics();
824
825                let peer_kind = self
826                    .swarm
827                    .state()
828                    .peers()
829                    .peer_by_id(peer_id)
830                    .map(|(_, kind)| kind)
831                    .unwrap_or_default();
832                let session_info = SessionInfo {
833                    peer_id,
834                    remote_addr,
835                    client_version,
836                    capabilities,
837                    status,
838                    version,
839                    peer_kind,
840                };
841
842                self.event_sender
843                    .notify(NetworkEvent::ActivePeerSession { info: session_info, messages });
844            }
845            SwarmEvent::PeerAdded(peer_id) => {
846                trace!(target: "net", ?peer_id, "Peer added");
847                self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerAdded(peer_id)));
848                self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64);
849            }
850            SwarmEvent::PeerRemoved(peer_id) => {
851                trace!(target: "net", ?peer_id, "Peer dropped");
852                self.event_sender.notify(NetworkEvent::Peer(PeerEvent::PeerRemoved(peer_id)));
853                self.metrics.tracked_peers.set(self.swarm.state().peers().num_known_peers() as f64);
854            }
855            SwarmEvent::SessionClosed { peer_id, remote_addr, error } => {
856                let total_active = self.num_active_peers.fetch_sub(1, Ordering::Relaxed) - 1;
857                self.metrics.connected_peers.set(total_active as f64);
858                trace!(
859                    target: "net",
860                    ?remote_addr,
861                    ?peer_id,
862                    ?total_active,
863                    ?error,
864                    "Session disconnected"
865                );
866
867                // Capture direction before state is reset to Idle
868                let is_inbound = self.swarm.state().peers().is_inbound_peer(&peer_id);
869
870                let reason = if let Some(ref err) = error {
871                    // If the connection was closed due to an error, we report
872                    // the peer
873                    self.swarm.state_mut().peers_mut().on_active_session_dropped(
874                        &remote_addr,
875                        &peer_id,
876                        err,
877                    );
878                    self.backed_off_peers_metrics.increment_for_reason(
879                        BackoffReason::from_disconnect(err.as_disconnected()),
880                    );
881                    err.as_disconnected()
882                } else {
883                    // Gracefully disconnected
884                    self.swarm.state_mut().peers_mut().on_active_session_gracefully_closed(peer_id);
885                    self.backed_off_peers_metrics
886                        .increment_for_reason(BackoffReason::GracefulClose);
887                    None
888                };
889                self.closed_sessions_metrics.active.increment(1);
890                self.update_active_connection_metrics();
891
892                if let Some(reason) = reason {
893                    if is_inbound {
894                        self.disconnect_metrics.increment_inbound(reason);
895                    } else {
896                        self.disconnect_metrics.increment_outbound(reason);
897                    }
898                }
899                self.metrics
900                    .backed_off_peers
901                    .set(self.swarm.state().peers().num_backed_off_peers() as f64);
902                self.event_sender
903                    .notify(NetworkEvent::Peer(PeerEvent::SessionClosed { peer_id, reason }));
904            }
905            SwarmEvent::IncomingPendingSessionClosed { remote_addr, error } => {
906                trace!(
907                    target: "net",
908                    ?remote_addr,
909                    ?error,
910                    "Incoming pending session failed"
911                );
912
913                if let Some(ref err) = error {
914                    self.swarm
915                        .state_mut()
916                        .peers_mut()
917                        .on_incoming_pending_session_dropped(remote_addr, err);
918                    self.pending_session_failure_metrics.inbound.increment(1);
919                    if let Some(reason) = err.as_disconnected() {
920                        self.disconnect_metrics.increment_inbound(reason);
921                    }
922                } else {
923                    self.swarm
924                        .state_mut()
925                        .peers_mut()
926                        .on_incoming_pending_session_gracefully_closed();
927                }
928                self.closed_sessions_metrics.incoming_pending.increment(1);
929                self.metrics
930                    .incoming_connections
931                    .set(self.swarm.state().peers().num_inbound_connections() as f64);
932            }
933            SwarmEvent::OutgoingPendingSessionClosed { remote_addr, peer_id, error } => {
934                trace!(
935                    target: "net",
936                    ?remote_addr,
937                    ?peer_id,
938                    ?error,
939                    "Outgoing pending session failed"
940                );
941
942                if let Some(ref err) = error {
943                    self.swarm.state_mut().peers_mut().on_outgoing_pending_session_dropped(
944                        &remote_addr,
945                        &peer_id,
946                        err,
947                    );
948                    self.pending_session_failure_metrics.outbound.increment(1);
949                    self.backed_off_peers_metrics.increment_for_reason(
950                        BackoffReason::from_disconnect(err.as_disconnected()),
951                    );
952                    if let Some(reason) = err.as_disconnected() {
953                        self.disconnect_metrics.increment_outbound(reason);
954                    }
955                } else {
956                    self.swarm
957                        .state_mut()
958                        .peers_mut()
959                        .on_outgoing_pending_session_gracefully_closed(&peer_id);
960                }
961                self.closed_sessions_metrics.outgoing_pending.increment(1);
962                self.update_pending_connection_metrics();
963                self.metrics
964                    .backed_off_peers
965                    .set(self.swarm.state().peers().num_backed_off_peers() as f64);
966            }
967            SwarmEvent::OutgoingConnectionError { remote_addr, peer_id, error } => {
968                trace!(
969                    target: "net",
970                    ?remote_addr,
971                    ?peer_id,
972                    %error,
973                    "Outgoing connection error"
974                );
975
976                self.swarm.state_mut().peers_mut().on_outgoing_connection_failure(
977                    &remote_addr,
978                    &peer_id,
979                    &error,
980                );
981
982                self.backed_off_peers_metrics.increment_for_reason(BackoffReason::ConnectionError);
983                self.metrics
984                    .backed_off_peers
985                    .set(self.swarm.state().peers().num_backed_off_peers() as f64);
986                self.update_pending_connection_metrics();
987            }
988            SwarmEvent::BadMessage { peer_id } => {
989                self.swarm
990                    .state_mut()
991                    .peers_mut()
992                    .apply_reputation_change(&peer_id, ReputationChangeKind::BadMessage);
993                self.metrics.invalid_messages_received.increment(1);
994            }
995            SwarmEvent::ProtocolBreach { peer_id } => {
996                self.swarm
997                    .state_mut()
998                    .peers_mut()
999                    .apply_reputation_change(&peer_id, ReputationChangeKind::BadProtocol);
1000            }
1001        }
1002    }
1003
1004    /// Returns [`PeerInfo`] for all connected peers
1005    fn get_peer_infos(&self) -> Vec<PeerInfo> {
1006        self.swarm
1007            .sessions()
1008            .active_sessions()
1009            .iter()
1010            .filter_map(|(&peer_id, session)| {
1011                self.swarm
1012                    .state()
1013                    .peers()
1014                    .peer_by_id(peer_id)
1015                    .map(|(record, kind)| session.peer_info(&record, kind))
1016            })
1017            .collect()
1018    }
1019
1020    /// Returns [`PeerInfo`] for a given peer.
1021    ///
1022    /// Returns `None` if there's no active session to the peer.
1023    fn get_peer_info_by_id(&self, peer_id: PeerId) -> Option<PeerInfo> {
1024        self.swarm.sessions().active_sessions().get(&peer_id).and_then(|session| {
1025            self.swarm
1026                .state()
1027                .peers()
1028                .peer_by_id(peer_id)
1029                .map(|(record, kind)| session.peer_info(&record, kind))
1030        })
1031    }
1032
1033    /// Returns [`PeerInfo`] for a given peers.
1034    ///
1035    /// Ignore the non-active peer.
1036    fn get_peer_infos_by_ids(&self, peer_ids: impl IntoIterator<Item = PeerId>) -> Vec<PeerInfo> {
1037        peer_ids.into_iter().filter_map(|peer_id| self.get_peer_info_by_id(peer_id)).collect()
1038    }
1039
1040    /// Updates the metrics for active,established connections
1041    #[inline]
1042    fn update_active_connection_metrics(&self) {
1043        self.metrics
1044            .incoming_connections
1045            .set(self.swarm.state().peers().num_inbound_connections() as f64);
1046        self.metrics
1047            .outgoing_connections
1048            .set(self.swarm.state().peers().num_outbound_connections() as f64);
1049    }
1050
1051    /// Updates the metrics for pending connections
1052    #[inline]
1053    fn update_pending_connection_metrics(&self) {
1054        self.metrics
1055            .pending_outgoing_connections
1056            .set(self.swarm.state().peers().num_pending_outbound_connections() as f64);
1057        self.metrics
1058            .total_pending_connections
1059            .set(self.swarm.sessions().num_pending_connections() as f64);
1060    }
1061
1062    /// Drives the [`NetworkManager`] future until a [`GracefulShutdown`] signal is received.
1063    ///
1064    /// This invokes the given function `shutdown_hook` while holding the graceful shutdown guard.
1065    pub async fn run_until_graceful_shutdown<F, R>(
1066        mut self,
1067        shutdown: GracefulShutdown,
1068        shutdown_hook: F,
1069    ) -> R
1070    where
1071        F: FnOnce(Self) -> R,
1072    {
1073        let mut graceful_guard = None;
1074        tokio::select! {
1075            _ = &mut self => {},
1076            guard = shutdown => {
1077                graceful_guard = Some(guard);
1078            },
1079        }
1080
1081        self.perform_network_shutdown();
1082        let res = shutdown_hook(self);
1083        drop(graceful_guard);
1084        res
1085    }
1086
1087    /// Performs a graceful network shutdown by stopping new connections from being accepted while
1088    /// draining current and pending connections.
1089    fn perform_network_shutdown(&mut self) {
1090        // Set connection status to `Shutdown`. Stops node from accepting
1091        // new incoming connections as well as sending connection requests to newly
1092        // discovered nodes.
1093        self.swarm.on_shutdown_requested();
1094        // Disconnect all active connections
1095        self.swarm.sessions_mut().disconnect_all(Some(DisconnectReason::ClientQuitting));
1096        // drop pending connections
1097        self.swarm.sessions_mut().disconnect_all_pending();
1098    }
1099}
1100
1101impl<N: NetworkPrimitives> Future for NetworkManager<N> {
1102    type Output = ();
1103
1104    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1105        let start = Instant::now();
1106        let mut poll_durations = NetworkManagerPollDurations::default();
1107
1108        let this = self.get_mut();
1109
1110        // poll new block imports (expected to be a noop for POS)
1111        while let Poll::Ready(outcome) = this.block_import.poll(cx) {
1112            this.on_block_import_result(outcome);
1113        }
1114
1115        // These loops drive the entire state of network and does a lot of work. Under heavy load
1116        // (many messages/events), data may arrive faster than it can be processed (incoming
1117        // messages/requests -> events), and it is possible that more data has already arrived by
1118        // the time an internal event is processed. Which could turn this loop into a busy loop.
1119        // Without yielding back to the executor, it can starve other tasks waiting on that
1120        // executor to execute them, or drive underlying resources To prevent this, we
1121        // preemptively return control when the `budget` is exhausted. The value itself is chosen
1122        // somewhat arbitrarily, it is high enough so the swarm can make meaningful progress but
1123        // low enough that this loop does not starve other tasks for too long. If the budget is
1124        // exhausted we manually yield back control to the (coop) scheduler. This manual yield
1125        // point should prevent situations where polling appears to be frozen. See also
1126        // <https://tokio.rs/blog/2020-04-preemption> And tokio's docs on cooperative scheduling
1127        // <https://docs.rs/tokio/latest/tokio/task/#cooperative-scheduling>
1128        //
1129        // Testing has shown that this loop naturally reaches the pending state within 1-5
1130        // iterations in << 100µs in most cases. On average it requires ~50µs, which is inside the
1131        // range of what's recommended as rule of thumb.
1132        // <https://ryhl.io/blog/async-what-is-blocking/>
1133
1134        // process incoming messages from a handle (`TransactionsManager` has one)
1135        //
1136        // will only be closed if the channel was deliberately closed since we always have an
1137        // instance of `NetworkHandle`
1138        let start_network_handle = Instant::now();
1139        let maybe_more_handle_messages = poll_nested_stream_with_budget!(
1140            "net",
1141            "Network message channel",
1142            DEFAULT_BUDGET_TRY_DRAIN_NETWORK_HANDLE_CHANNEL,
1143            this.from_handle_rx.poll_next_unpin(cx),
1144            |msg| this.on_handle_message(msg),
1145            error!("Network channel closed");
1146        );
1147        poll_durations.acc_network_handle = start_network_handle.elapsed();
1148
1149        // process incoming messages from the network
1150        let maybe_more_swarm_events = poll_nested_stream_with_budget!(
1151            "net",
1152            "Swarm events stream",
1153            DEFAULT_BUDGET_TRY_DRAIN_SWARM,
1154            this.swarm.poll_next_unpin(cx),
1155            |event| this.on_swarm_event(event),
1156        );
1157        poll_durations.acc_swarm =
1158            start_network_handle.elapsed() - poll_durations.acc_network_handle;
1159
1160        // all streams are fully drained and import futures pending
1161        if maybe_more_handle_messages || maybe_more_swarm_events {
1162            // make sure we're woken up again
1163            cx.waker().wake_by_ref();
1164            return Poll::Pending
1165        }
1166
1167        this.update_poll_metrics(start, poll_durations);
1168
1169        Poll::Pending
1170    }
1171}
1172
1173#[derive(Debug, Default)]
1174struct NetworkManagerPollDurations {
1175    acc_network_handle: Duration,
1176    acc_swarm: Duration,
1177}