reth_network/session/
active.rs

1//! Represents an established session.
2
3use core::sync::atomic::Ordering;
4use std::{
5    collections::VecDeque,
6    future::Future,
7    net::SocketAddr,
8    pin::Pin,
9    sync::{atomic::AtomicU64, Arc},
10    task::{ready, Context, Poll},
11    time::{Duration, Instant},
12};
13
14use crate::{
15    message::{NewBlockMessage, PeerMessage, PeerResponse, PeerResponseResult},
16    session::{
17        conn::EthRlpxConnection,
18        handle::{ActiveSessionMessage, SessionCommand},
19        BlockRangeInfo, EthVersion, SessionId,
20    },
21};
22use alloy_eips::merge::EPOCH_SLOTS;
23use alloy_primitives::Sealable;
24use futures::{stream::Fuse, SinkExt, StreamExt};
25use metrics::Gauge;
26use reth_eth_wire::{
27    errors::{EthHandshakeError, EthStreamError},
28    message::{EthBroadcastMessage, MessageError},
29    Capabilities, DisconnectP2P, DisconnectReason, EthMessage, NetworkPrimitives, NewBlockPayload,
30};
31use reth_eth_wire_types::{message::RequestPair, RawCapabilityMessage};
32use reth_metrics::common::mpsc::MeteredPollSender;
33use reth_network_api::PeerRequest;
34use reth_network_p2p::error::RequestError;
35use reth_network_peers::PeerId;
36use reth_network_types::session::config::INITIAL_REQUEST_TIMEOUT;
37use reth_primitives_traits::Block;
38use rustc_hash::FxHashMap;
39use tokio::{
40    sync::{mpsc::error::TrySendError, oneshot},
41    time::Interval,
42};
43use tokio_stream::wrappers::ReceiverStream;
44use tokio_util::sync::PollSender;
45use tracing::{debug, trace};
46
47/// The recommended interval at which to check if a new range update should be sent to the remote
48/// peer.
49///
50/// Updates are only sent when the block height has advanced by at least one epoch (32 blocks)
51/// since the last update. The interval is set to one epoch duration in seconds.
52pub(super) const RANGE_UPDATE_INTERVAL: Duration = Duration::from_secs(EPOCH_SLOTS * 12);
53
54// Constants for timeout updating.
55
56/// Minimum timeout value
57const MINIMUM_TIMEOUT: Duration = Duration::from_secs(2);
58
59/// Maximum timeout value
60const MAXIMUM_TIMEOUT: Duration = INITIAL_REQUEST_TIMEOUT;
61/// How much the new measurements affect the current timeout (X percent)
62const SAMPLE_IMPACT: f64 = 0.1;
63/// Amount of RTTs before timeout
64const TIMEOUT_SCALING: u32 = 3;
65
66/// Restricts the number of queued outgoing messages for larger responses:
67///  - Block Bodies
68///  - Receipts
69///  - Headers
70///  - `PooledTransactions`
71///
72/// With proper softlimits in place (2MB) this targets 10MB (4+1 * 2MB) of outgoing response data.
73///
74/// This parameter serves as backpressure for reading additional requests from the remote.
75/// Once we've queued up more responses than this, the session should prioritize message flushing
76/// before reading any more messages from the remote peer, throttling the peer.
77const MAX_QUEUED_OUTGOING_RESPONSES: usize = 4;
78
79/// The type that advances an established session by listening for incoming messages (from local
80/// node or read from connection) and emitting events back to the
81/// [`SessionManager`](super::SessionManager).
82///
83/// It listens for
84///    - incoming commands from the [`SessionManager`](super::SessionManager)
85///    - incoming _internal_ requests/broadcasts via the request/command channel
86///    - incoming requests/broadcasts _from remote_ via the connection
87///    - responses for handled ETH requests received from the remote peer.
88#[expect(dead_code)]
89pub(crate) struct ActiveSession<N: NetworkPrimitives> {
90    /// Keeps track of request ids.
91    pub(crate) next_id: u64,
92    /// The underlying connection.
93    pub(crate) conn: EthRlpxConnection<N>,
94    /// Identifier of the node we're connected to.
95    pub(crate) remote_peer_id: PeerId,
96    /// The address we're connected to.
97    pub(crate) remote_addr: SocketAddr,
98    /// All capabilities the peer announced
99    pub(crate) remote_capabilities: Arc<Capabilities>,
100    /// Internal identifier of this session
101    pub(crate) session_id: SessionId,
102    /// Incoming commands from the manager
103    pub(crate) commands_rx: ReceiverStream<SessionCommand<N>>,
104    /// Sink to send messages to the [`SessionManager`](super::SessionManager).
105    pub(crate) to_session_manager: MeteredPollSender<ActiveSessionMessage<N>>,
106    /// A message that needs to be delivered to the session manager
107    pub(crate) pending_message_to_session: Option<ActiveSessionMessage<N>>,
108    /// Incoming internal requests which are delegated to the remote peer.
109    pub(crate) internal_request_rx: Fuse<ReceiverStream<PeerRequest<N>>>,
110    /// All requests sent to the remote peer we're waiting on a response
111    pub(crate) inflight_requests: FxHashMap<u64, InflightRequest<PeerRequest<N>>>,
112    /// All requests that were sent by the remote peer and we're waiting on an internal response
113    pub(crate) received_requests_from_remote: Vec<ReceivedRequest<N>>,
114    /// Buffered messages that should be handled and sent to the peer.
115    pub(crate) queued_outgoing: QueuedOutgoingMessages<N>,
116    /// The maximum time we wait for a response from a peer.
117    pub(crate) internal_request_timeout: Arc<AtomicU64>,
118    /// Interval when to check for timed out requests.
119    pub(crate) internal_request_timeout_interval: Interval,
120    /// If an [`ActiveSession`] does not receive a response at all within this duration then it is
121    /// considered a protocol violation and the session will initiate a drop.
122    pub(crate) protocol_breach_request_timeout: Duration,
123    /// Used to reserve a slot to guarantee that the termination message is delivered
124    pub(crate) terminate_message:
125        Option<(PollSender<ActiveSessionMessage<N>>, ActiveSessionMessage<N>)>,
126    /// The eth69 range info for the remote peer.
127    pub(crate) range_info: Option<BlockRangeInfo>,
128    /// The eth69 range info for the local node (this node).
129    /// This represents the range of blocks that this node can serve to other peers.
130    pub(crate) local_range_info: BlockRangeInfo,
131    /// Optional interval for sending periodic range updates to the remote peer (eth69+)
132    /// The interval is set to one epoch duration (~6.4 minutes), but updates are only sent when
133    /// the block height has advanced by at least one epoch (32 blocks) since the last update
134    pub(crate) range_update_interval: Option<Interval>,
135    /// The last latest block number we sent in a range update
136    /// Used to avoid sending unnecessary updates when block height hasn't changed significantly
137    pub(crate) last_sent_latest_block: Option<u64>,
138}
139
140impl<N: NetworkPrimitives> ActiveSession<N> {
141    /// Returns `true` if the session is currently in the process of disconnecting
142    fn is_disconnecting(&self) -> bool {
143        self.conn.inner().is_disconnecting()
144    }
145
146    /// Returns the next request id
147    const fn next_id(&mut self) -> u64 {
148        let id = self.next_id;
149        self.next_id += 1;
150        id
151    }
152
153    /// Shrinks the capacity of the internal buffers.
154    pub fn shrink_to_fit(&mut self) {
155        self.received_requests_from_remote.shrink_to_fit();
156        self.queued_outgoing.shrink_to_fit();
157    }
158
159    /// Returns how many responses we've currently queued up.
160    fn queued_response_count(&self) -> usize {
161        self.queued_outgoing.messages.iter().filter(|m| m.is_response()).count()
162    }
163
164    /// Handle a message read from the connection.
165    ///
166    /// Returns an error if the message is considered to be in violation of the protocol.
167    fn on_incoming_message(&mut self, msg: EthMessage<N>) -> OnIncomingMessageOutcome<N> {
168        /// A macro that handles an incoming request
169        /// This creates a new channel and tries to send the sender half to the session while
170        /// storing the receiver half internally so the pending response can be polled.
171        macro_rules! on_request {
172            ($req:ident, $resp_item:ident, $req_item:ident) => {{
173                let RequestPair { request_id, message: request } = $req;
174                let (tx, response) = oneshot::channel();
175                let received = ReceivedRequest {
176                    request_id,
177                    rx: PeerResponse::$resp_item { response },
178                    received: Instant::now(),
179                };
180                self.received_requests_from_remote.push(received);
181                self.try_emit_request(PeerMessage::EthRequest(PeerRequest::$req_item {
182                    request,
183                    response: tx,
184                }))
185                .into()
186            }};
187        }
188
189        /// Processes a response received from the peer
190        macro_rules! on_response {
191            ($resp:ident, $item:ident) => {{
192                let RequestPair { request_id, message } = $resp;
193                if let Some(req) = self.inflight_requests.remove(&request_id) {
194                    match req.request {
195                        RequestState::Waiting(PeerRequest::$item { response, .. }) => {
196                            trace!(peer_id=?self.remote_peer_id, ?request_id, "received response from peer");
197                            let _ = response.send(Ok(message));
198                            self.update_request_timeout(req.timestamp, Instant::now());
199                        }
200                        RequestState::Waiting(request) => {
201                            request.send_bad_response();
202                        }
203                        RequestState::TimedOut => {
204                            // request was already timed out internally
205                            self.update_request_timeout(req.timestamp, Instant::now());
206                        }
207                    }
208                } else {
209                    trace!(peer_id=?self.remote_peer_id, ?request_id, "received response to unknown request");
210                    // we received a response to a request we never sent
211                    self.on_bad_message();
212                }
213
214                OnIncomingMessageOutcome::Ok
215            }};
216        }
217
218        match msg {
219            message @ EthMessage::Status(_) => OnIncomingMessageOutcome::BadMessage {
220                error: EthStreamError::EthHandshakeError(EthHandshakeError::StatusNotInHandshake),
221                message,
222            },
223            EthMessage::NewBlockHashes(msg) => {
224                self.try_emit_broadcast(PeerMessage::NewBlockHashes(msg)).into()
225            }
226            EthMessage::NewBlock(msg) => {
227                let block = NewBlockMessage {
228                    hash: msg.block().header().hash_slow(),
229                    block: Arc::new(*msg),
230                };
231                self.try_emit_broadcast(PeerMessage::NewBlock(block)).into()
232            }
233            EthMessage::Transactions(msg) => {
234                self.try_emit_broadcast(PeerMessage::ReceivedTransaction(msg)).into()
235            }
236            EthMessage::NewPooledTransactionHashes66(msg) => {
237                self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
238            }
239            EthMessage::NewPooledTransactionHashes68(msg) => {
240                self.try_emit_broadcast(PeerMessage::PooledTransactions(msg.into())).into()
241            }
242            EthMessage::GetBlockHeaders(req) => {
243                on_request!(req, BlockHeaders, GetBlockHeaders)
244            }
245            EthMessage::BlockHeaders(resp) => {
246                on_response!(resp, GetBlockHeaders)
247            }
248            EthMessage::GetBlockBodies(req) => {
249                on_request!(req, BlockBodies, GetBlockBodies)
250            }
251            EthMessage::BlockBodies(resp) => {
252                on_response!(resp, GetBlockBodies)
253            }
254            EthMessage::GetPooledTransactions(req) => {
255                on_request!(req, PooledTransactions, GetPooledTransactions)
256            }
257            EthMessage::PooledTransactions(resp) => {
258                on_response!(resp, GetPooledTransactions)
259            }
260            EthMessage::GetNodeData(req) => {
261                on_request!(req, NodeData, GetNodeData)
262            }
263            EthMessage::NodeData(resp) => {
264                on_response!(resp, GetNodeData)
265            }
266            EthMessage::GetReceipts(req) => {
267                if self.conn.version() >= EthVersion::Eth69 {
268                    on_request!(req, Receipts69, GetReceipts69)
269                } else {
270                    on_request!(req, Receipts, GetReceipts)
271                }
272            }
273            EthMessage::GetReceipts70(req) => {
274                on_request!(req, Receipts70, GetReceipts70)
275            }
276            EthMessage::Receipts(resp) => {
277                on_response!(resp, GetReceipts)
278            }
279            EthMessage::Receipts69(resp) => {
280                on_response!(resp, GetReceipts69)
281            }
282            EthMessage::Receipts70(resp) => {
283                on_response!(resp, GetReceipts70)
284            }
285            EthMessage::BlockRangeUpdate(msg) => {
286                // Validate that earliest <= latest according to the spec
287                if msg.earliest > msg.latest {
288                    return OnIncomingMessageOutcome::BadMessage {
289                        error: EthStreamError::InvalidMessage(MessageError::Other(format!(
290                            "invalid block range: earliest ({}) > latest ({})",
291                            msg.earliest, msg.latest
292                        ))),
293                        message: EthMessage::BlockRangeUpdate(msg),
294                    };
295                }
296
297                // Validate that the latest hash is not zero
298                if msg.latest_hash.is_zero() {
299                    return OnIncomingMessageOutcome::BadMessage {
300                        error: EthStreamError::InvalidMessage(MessageError::Other(
301                            "invalid block range: latest_hash cannot be zero".to_string(),
302                        )),
303                        message: EthMessage::BlockRangeUpdate(msg),
304                    };
305                }
306
307                if let Some(range_info) = self.range_info.as_ref() {
308                    range_info.update(msg.earliest, msg.latest, msg.latest_hash);
309                }
310
311                OnIncomingMessageOutcome::Ok
312            }
313            EthMessage::Other(bytes) => self.try_emit_broadcast(PeerMessage::Other(bytes)).into(),
314        }
315    }
316
317    /// Handle an internal peer request that will be sent to the remote.
318    fn on_internal_peer_request(&mut self, request: PeerRequest<N>, deadline: Instant) {
319        let request_id = self.next_id();
320        trace!(?request, peer_id=?self.remote_peer_id, ?request_id, "sending request to peer");
321        let msg = request.create_request_message(request_id).map_versioned(self.conn.version());
322
323        self.queued_outgoing.push_back(msg.into());
324        let req = InflightRequest {
325            request: RequestState::Waiting(request),
326            timestamp: Instant::now(),
327            deadline,
328        };
329        self.inflight_requests.insert(request_id, req);
330    }
331
332    /// Handle a message received from the internal network
333    fn on_internal_peer_message(&mut self, msg: PeerMessage<N>) {
334        match msg {
335            PeerMessage::NewBlockHashes(msg) => {
336                self.queued_outgoing.push_back(EthMessage::NewBlockHashes(msg).into());
337            }
338            PeerMessage::NewBlock(msg) => {
339                self.queued_outgoing.push_back(EthBroadcastMessage::NewBlock(msg.block).into());
340            }
341            PeerMessage::PooledTransactions(msg) => {
342                if msg.is_valid_for_version(self.conn.version()) {
343                    self.queued_outgoing.push_back(EthMessage::from(msg).into());
344                } else {
345                    debug!(target: "net", ?msg,  version=?self.conn.version(), "Message is invalid for connection version, skipping");
346                }
347            }
348            PeerMessage::EthRequest(req) => {
349                let deadline = self.request_deadline();
350                self.on_internal_peer_request(req, deadline);
351            }
352            PeerMessage::SendTransactions(msg) => {
353                self.queued_outgoing.push_back(EthBroadcastMessage::Transactions(msg).into());
354            }
355            PeerMessage::BlockRangeUpdated(_) => {}
356            PeerMessage::ReceivedTransaction(_) => {
357                unreachable!("Not emitted by network")
358            }
359            PeerMessage::Other(other) => {
360                self.queued_outgoing.push_back(OutgoingMessage::Raw(other));
361            }
362        }
363    }
364
365    /// Returns the deadline timestamp at which the request times out
366    fn request_deadline(&self) -> Instant {
367        Instant::now() +
368            Duration::from_millis(self.internal_request_timeout.load(Ordering::Relaxed))
369    }
370
371    /// Handle a Response to the peer
372    ///
373    /// This will queue the response to be sent to the peer
374    fn handle_outgoing_response(&mut self, id: u64, resp: PeerResponseResult<N>) {
375        match resp.try_into_message(id) {
376            Ok(msg) => {
377                self.queued_outgoing.push_back(msg.into());
378            }
379            Err(err) => {
380                debug!(target: "net", %err, "Failed to respond to received request");
381            }
382        }
383    }
384
385    /// Send a message back to the [`SessionManager`](super::SessionManager).
386    ///
387    /// Returns the message if the bounded channel is currently unable to handle this message.
388    #[expect(clippy::result_large_err)]
389    fn try_emit_broadcast(&self, message: PeerMessage<N>) -> Result<(), ActiveSessionMessage<N>> {
390        let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) };
391
392        match sender
393            .try_send(ActiveSessionMessage::ValidMessage { peer_id: self.remote_peer_id, message })
394        {
395            Ok(_) => Ok(()),
396            Err(err) => {
397                trace!(
398                    target: "net",
399                    %err,
400                    "no capacity for incoming broadcast",
401                );
402                match err {
403                    TrySendError::Full(msg) => Err(msg),
404                    TrySendError::Closed(_) => Ok(()),
405                }
406            }
407        }
408    }
409
410    /// Send a message back to the [`SessionManager`](super::SessionManager)
411    /// covering both broadcasts and incoming requests.
412    ///
413    /// Returns the message if the bounded channel is currently unable to handle this message.
414    #[expect(clippy::result_large_err)]
415    fn try_emit_request(&self, message: PeerMessage<N>) -> Result<(), ActiveSessionMessage<N>> {
416        let Some(sender) = self.to_session_manager.inner().get_ref() else { return Ok(()) };
417
418        match sender
419            .try_send(ActiveSessionMessage::ValidMessage { peer_id: self.remote_peer_id, message })
420        {
421            Ok(_) => Ok(()),
422            Err(err) => {
423                trace!(
424                    target: "net",
425                    %err,
426                    "no capacity for incoming request",
427                );
428                match err {
429                    TrySendError::Full(msg) => Err(msg),
430                    TrySendError::Closed(_) => {
431                        // Note: this would mean the `SessionManager` was dropped, which is already
432                        // handled by checking if the command receiver channel has been closed.
433                        Ok(())
434                    }
435                }
436            }
437        }
438    }
439
440    /// Notify the manager that the peer sent a bad message
441    fn on_bad_message(&self) {
442        let Some(sender) = self.to_session_manager.inner().get_ref() else { return };
443        let _ = sender.try_send(ActiveSessionMessage::BadMessage { peer_id: self.remote_peer_id });
444    }
445
446    /// Report back that this session has been closed.
447    fn emit_disconnect(&mut self, cx: &mut Context<'_>) -> Poll<()> {
448        trace!(target: "net::session", remote_peer_id=?self.remote_peer_id, "emitting disconnect");
449        let msg = ActiveSessionMessage::Disconnected {
450            peer_id: self.remote_peer_id,
451            remote_addr: self.remote_addr,
452        };
453
454        self.terminate_message = Some((self.to_session_manager.inner().clone(), msg));
455        self.poll_terminate_message(cx).expect("message is set")
456    }
457
458    /// Report back that this session has been closed due to an error
459    fn close_on_error(&mut self, error: EthStreamError, cx: &mut Context<'_>) -> Poll<()> {
460        let msg = ActiveSessionMessage::ClosedOnConnectionError {
461            peer_id: self.remote_peer_id,
462            remote_addr: self.remote_addr,
463            error,
464        };
465        self.terminate_message = Some((self.to_session_manager.inner().clone(), msg));
466        self.poll_terminate_message(cx).expect("message is set")
467    }
468
469    /// Starts the disconnect process
470    fn start_disconnect(&mut self, reason: DisconnectReason) -> Result<(), EthStreamError> {
471        Ok(self.conn.inner_mut().start_disconnect(reason)?)
472    }
473
474    /// Flushes the disconnect message and emits the corresponding message
475    fn poll_disconnect(&mut self, cx: &mut Context<'_>) -> Poll<()> {
476        debug_assert!(self.is_disconnecting(), "not disconnecting");
477
478        // try to close the flush out the remaining Disconnect message
479        let _ = ready!(self.conn.poll_close_unpin(cx));
480        self.emit_disconnect(cx)
481    }
482
483    /// Attempts to disconnect by sending the given disconnect reason
484    fn try_disconnect(&mut self, reason: DisconnectReason, cx: &mut Context<'_>) -> Poll<()> {
485        match self.start_disconnect(reason) {
486            Ok(()) => {
487                // we're done
488                self.poll_disconnect(cx)
489            }
490            Err(err) => {
491                debug!(target: "net::session", %err, remote_peer_id=?self.remote_peer_id, "could not send disconnect");
492                self.close_on_error(err, cx)
493            }
494        }
495    }
496
497    /// Checks for _internally_ timed out requests.
498    ///
499    /// If a requests misses its deadline, then it is timed out internally.
500    /// If a request misses the `protocol_breach_request_timeout` then this session is considered in
501    /// protocol violation and will close.
502    ///
503    /// Returns `true` if a peer missed the `protocol_breach_request_timeout`, in which case the
504    /// session should be terminated.
505    #[must_use]
506    fn check_timed_out_requests(&mut self, now: Instant) -> bool {
507        for (id, req) in &mut self.inflight_requests {
508            if req.is_timed_out(now) {
509                if req.is_waiting() {
510                    debug!(target: "net::session", ?id, remote_peer_id=?self.remote_peer_id, "timed out outgoing request");
511                    req.timeout();
512                } else if now - req.timestamp > self.protocol_breach_request_timeout {
513                    return true
514                }
515            }
516        }
517
518        false
519    }
520
521    /// Updates the request timeout with a request's timestamps
522    fn update_request_timeout(&mut self, sent: Instant, received: Instant) {
523        let elapsed = received.saturating_duration_since(sent);
524
525        let current = Duration::from_millis(self.internal_request_timeout.load(Ordering::Relaxed));
526        let request_timeout = calculate_new_timeout(current, elapsed);
527        self.internal_request_timeout.store(request_timeout.as_millis() as u64, Ordering::Relaxed);
528        self.internal_request_timeout_interval = tokio::time::interval(request_timeout);
529    }
530
531    /// If a termination message is queued this will try to send it
532    fn poll_terminate_message(&mut self, cx: &mut Context<'_>) -> Option<Poll<()>> {
533        let (mut tx, msg) = self.terminate_message.take()?;
534        match tx.poll_reserve(cx) {
535            Poll::Pending => {
536                self.terminate_message = Some((tx, msg));
537                return Some(Poll::Pending)
538            }
539            Poll::Ready(Ok(())) => {
540                let _ = tx.send_item(msg);
541            }
542            Poll::Ready(Err(_)) => {
543                // channel closed
544            }
545        }
546        // terminate the task
547        Some(Poll::Ready(()))
548    }
549}
550
551impl<N: NetworkPrimitives> Future for ActiveSession<N> {
552    type Output = ();
553
554    fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
555        let this = self.get_mut();
556
557        // if the session is terminate we have to send the termination message before we can close
558        if let Some(terminate) = this.poll_terminate_message(cx) {
559            return terminate
560        }
561
562        if this.is_disconnecting() {
563            return this.poll_disconnect(cx)
564        }
565
566        // The receive loop can be CPU intensive since it involves message decoding which could take
567        // up a lot of resources and increase latencies for other sessions if not yielded manually.
568        // If the budget is exhausted we manually yield back control to the (coop) scheduler. This
569        // manual yield point should prevent situations where polling appears to be frozen. See also <https://tokio.rs/blog/2020-04-preemption>
570        // And tokio's docs on cooperative scheduling <https://docs.rs/tokio/latest/tokio/task/#cooperative-scheduling>
571        let mut budget = 4;
572
573        // The main poll loop that drives the session
574        'main: loop {
575            let mut progress = false;
576
577            // we prioritize incoming commands sent from the session manager
578            loop {
579                match this.commands_rx.poll_next_unpin(cx) {
580                    Poll::Pending => break,
581                    Poll::Ready(None) => {
582                        // this is only possible when the manager was dropped, in which case we also
583                        // terminate this session
584                        return Poll::Ready(())
585                    }
586                    Poll::Ready(Some(cmd)) => {
587                        progress = true;
588                        match cmd {
589                            SessionCommand::Disconnect { reason } => {
590                                debug!(
591                                    target: "net::session",
592                                    ?reason,
593                                    remote_peer_id=?this.remote_peer_id,
594                                    "Received disconnect command for session"
595                                );
596                                let reason =
597                                    reason.unwrap_or(DisconnectReason::DisconnectRequested);
598
599                                return this.try_disconnect(reason, cx)
600                            }
601                            SessionCommand::Message(msg) => {
602                                this.on_internal_peer_message(msg);
603                            }
604                        }
605                    }
606                }
607            }
608
609            let deadline = this.request_deadline();
610
611            while let Poll::Ready(Some(req)) = this.internal_request_rx.poll_next_unpin(cx) {
612                progress = true;
613                this.on_internal_peer_request(req, deadline);
614            }
615
616            // Advance all active requests.
617            // We remove each request one by one and add them back.
618            for idx in (0..this.received_requests_from_remote.len()).rev() {
619                let mut req = this.received_requests_from_remote.swap_remove(idx);
620                match req.rx.poll(cx) {
621                    Poll::Pending => {
622                        // not ready yet
623                        this.received_requests_from_remote.push(req);
624                    }
625                    Poll::Ready(resp) => {
626                        this.handle_outgoing_response(req.request_id, resp);
627                    }
628                }
629            }
630
631            // Send messages by advancing the sink and queuing in buffered messages
632            while this.conn.poll_ready_unpin(cx).is_ready() {
633                if let Some(msg) = this.queued_outgoing.pop_front() {
634                    progress = true;
635                    let res = match msg {
636                        OutgoingMessage::Eth(msg) => this.conn.start_send_unpin(msg),
637                        OutgoingMessage::Broadcast(msg) => this.conn.start_send_broadcast(msg),
638                        OutgoingMessage::Raw(msg) => this.conn.start_send_raw(msg),
639                    };
640                    if let Err(err) = res {
641                        debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to send message");
642                        // notify the manager
643                        return this.close_on_error(err, cx)
644                    }
645                } else {
646                    // no more messages to send over the wire
647                    break
648                }
649            }
650
651            // read incoming messages from the wire
652            'receive: loop {
653                // ensure we still have enough budget for another iteration
654                budget -= 1;
655                if budget == 0 {
656                    // make sure we're woken up again
657                    cx.waker().wake_by_ref();
658                    break 'main
659                }
660
661                // try to resend the pending message that we could not send because the channel was
662                // full. [`PollSender`] will ensure that we're woken up again when the channel is
663                // ready to receive the message, and will only error if the channel is closed.
664                if let Some(msg) = this.pending_message_to_session.take() {
665                    match this.to_session_manager.poll_reserve(cx) {
666                        Poll::Ready(Ok(_)) => {
667                            let _ = this.to_session_manager.send_item(msg);
668                        }
669                        Poll::Ready(Err(_)) => return Poll::Ready(()),
670                        Poll::Pending => {
671                            this.pending_message_to_session = Some(msg);
672                            break 'receive
673                        }
674                    };
675                }
676
677                // check whether we should throttle incoming messages
678                if this.received_requests_from_remote.len() > MAX_QUEUED_OUTGOING_RESPONSES {
679                    // we're currently waiting for the responses to the peer's requests which aren't
680                    // queued as outgoing yet
681                    //
682                    // Note: we don't need to register the waker here because we polled the requests
683                    // above
684                    break 'receive
685                }
686
687                // we also need to check if we have multiple responses queued up
688                if this.queued_outgoing.messages.len() > MAX_QUEUED_OUTGOING_RESPONSES &&
689                    this.queued_response_count() > MAX_QUEUED_OUTGOING_RESPONSES
690                {
691                    // if we've queued up more responses than allowed, we don't poll for new
692                    // messages and break the receive loop early
693                    //
694                    // Note: we don't need to register the waker here because we still have
695                    // queued messages and the sink impl registered the waker because we've
696                    // already advanced it to `Pending` earlier
697                    break 'receive
698                }
699
700                match this.conn.poll_next_unpin(cx) {
701                    Poll::Pending => break,
702                    Poll::Ready(None) => {
703                        if this.is_disconnecting() {
704                            break
705                        }
706                        debug!(target: "net::session", remote_peer_id=?this.remote_peer_id, "eth stream completed");
707                        return this.emit_disconnect(cx)
708                    }
709                    Poll::Ready(Some(res)) => {
710                        match res {
711                            Ok(msg) => {
712                                trace!(target: "net::session", msg_id=?msg.message_id(), remote_peer_id=?this.remote_peer_id, "received eth message");
713                                // decode and handle message
714                                match this.on_incoming_message(msg) {
715                                    OnIncomingMessageOutcome::Ok => {
716                                        // handled successfully
717                                        progress = true;
718                                    }
719                                    OnIncomingMessageOutcome::BadMessage { error, message } => {
720                                        debug!(target: "net::session", %error, msg=?message, remote_peer_id=?this.remote_peer_id, "received invalid protocol message");
721                                        return this.close_on_error(error, cx)
722                                    }
723                                    OnIncomingMessageOutcome::NoCapacity(msg) => {
724                                        // failed to send due to lack of capacity
725                                        this.pending_message_to_session = Some(msg);
726                                    }
727                                }
728                            }
729                            Err(err) => {
730                                debug!(target: "net::session", %err, remote_peer_id=?this.remote_peer_id, "failed to receive message");
731                                return this.close_on_error(err, cx)
732                            }
733                        }
734                    }
735                }
736            }
737
738            if !progress {
739                break 'main
740            }
741        }
742
743        if let Some(interval) = &mut this.range_update_interval {
744            // Check if we should send a range update based on block height changes
745            while interval.poll_tick(cx).is_ready() {
746                let current_latest = this.local_range_info.latest();
747                let should_send = if let Some(last_sent) = this.last_sent_latest_block {
748                    // Only send if block height has advanced by at least one epoch (32 blocks)
749                    current_latest.saturating_sub(last_sent) >= EPOCH_SLOTS
750                } else {
751                    true // First update, always send
752                };
753
754                if should_send {
755                    this.queued_outgoing.push_back(
756                        EthMessage::BlockRangeUpdate(this.local_range_info.to_message()).into(),
757                    );
758                    this.last_sent_latest_block = Some(current_latest);
759                }
760            }
761        }
762
763        while this.internal_request_timeout_interval.poll_tick(cx).is_ready() {
764            // check for timed out requests
765            if this.check_timed_out_requests(Instant::now()) &&
766                let Poll::Ready(Ok(_)) = this.to_session_manager.poll_reserve(cx)
767            {
768                let msg = ActiveSessionMessage::ProtocolBreach { peer_id: this.remote_peer_id };
769                this.pending_message_to_session = Some(msg);
770            }
771        }
772
773        this.shrink_to_fit();
774
775        Poll::Pending
776    }
777}
778
779/// Tracks a request received from the peer
780pub(crate) struct ReceivedRequest<N: NetworkPrimitives> {
781    /// Protocol Identifier
782    request_id: u64,
783    /// Receiver half of the channel that's supposed to receive the proper response.
784    rx: PeerResponse<N>,
785    /// Timestamp when we read this msg from the wire.
786    #[expect(dead_code)]
787    received: Instant,
788}
789
790/// A request that waits for a response from the peer
791pub(crate) struct InflightRequest<R> {
792    /// Request we sent to peer and the internal response channel
793    request: RequestState<R>,
794    /// Instant when the request was sent
795    timestamp: Instant,
796    /// Time limit for the response
797    deadline: Instant,
798}
799
800// === impl InflightRequest ===
801
802impl<N: NetworkPrimitives> InflightRequest<PeerRequest<N>> {
803    /// Returns true if the request is timedout
804    #[inline]
805    fn is_timed_out(&self, now: Instant) -> bool {
806        now > self.deadline
807    }
808
809    /// Returns true if we're still waiting for a response
810    #[inline]
811    const fn is_waiting(&self) -> bool {
812        matches!(self.request, RequestState::Waiting(_))
813    }
814
815    /// This will timeout the request by sending an error response to the internal channel
816    fn timeout(&mut self) {
817        let mut req = RequestState::TimedOut;
818        std::mem::swap(&mut self.request, &mut req);
819
820        if let RequestState::Waiting(req) = req {
821            req.send_err_response(RequestError::Timeout);
822        }
823    }
824}
825
826/// All outcome variants when handling an incoming message
827enum OnIncomingMessageOutcome<N: NetworkPrimitives> {
828    /// Message successfully handled.
829    Ok,
830    /// Message is considered to be in violation of the protocol
831    BadMessage { error: EthStreamError, message: EthMessage<N> },
832    /// Currently no capacity to handle the message
833    NoCapacity(ActiveSessionMessage<N>),
834}
835
836impl<N: NetworkPrimitives> From<Result<(), ActiveSessionMessage<N>>>
837    for OnIncomingMessageOutcome<N>
838{
839    fn from(res: Result<(), ActiveSessionMessage<N>>) -> Self {
840        match res {
841            Ok(_) => Self::Ok,
842            Err(msg) => Self::NoCapacity(msg),
843        }
844    }
845}
846
847enum RequestState<R> {
848    /// Waiting for the response
849    Waiting(R),
850    /// Request already timed out
851    TimedOut,
852}
853
854/// Outgoing messages that can be sent over the wire.
855#[derive(Debug)]
856pub(crate) enum OutgoingMessage<N: NetworkPrimitives> {
857    /// A message that is owned.
858    Eth(EthMessage<N>),
859    /// A message that may be shared by multiple sessions.
860    Broadcast(EthBroadcastMessage<N>),
861    /// A raw capability message
862    Raw(RawCapabilityMessage),
863}
864
865impl<N: NetworkPrimitives> OutgoingMessage<N> {
866    /// Returns true if this is a response.
867    const fn is_response(&self) -> bool {
868        match self {
869            Self::Eth(msg) => msg.is_response(),
870            _ => false,
871        }
872    }
873}
874
875impl<N: NetworkPrimitives> From<EthMessage<N>> for OutgoingMessage<N> {
876    fn from(value: EthMessage<N>) -> Self {
877        Self::Eth(value)
878    }
879}
880
881impl<N: NetworkPrimitives> From<EthBroadcastMessage<N>> for OutgoingMessage<N> {
882    fn from(value: EthBroadcastMessage<N>) -> Self {
883        Self::Broadcast(value)
884    }
885}
886
887/// Calculates a new timeout using an updated estimation of the RTT
888#[inline]
889fn calculate_new_timeout(current_timeout: Duration, estimated_rtt: Duration) -> Duration {
890    let new_timeout = estimated_rtt.mul_f64(SAMPLE_IMPACT) * TIMEOUT_SCALING;
891
892    // this dampens sudden changes by taking a weighted mean of the old and new values
893    let smoothened_timeout = current_timeout.mul_f64(1.0 - SAMPLE_IMPACT) + new_timeout;
894
895    smoothened_timeout.clamp(MINIMUM_TIMEOUT, MAXIMUM_TIMEOUT)
896}
897
898/// A helper struct that wraps the queue of outgoing messages and a metric to track their count
899pub(crate) struct QueuedOutgoingMessages<N: NetworkPrimitives> {
900    messages: VecDeque<OutgoingMessage<N>>,
901    count: Gauge,
902}
903
904impl<N: NetworkPrimitives> QueuedOutgoingMessages<N> {
905    pub(crate) const fn new(metric: Gauge) -> Self {
906        Self { messages: VecDeque::new(), count: metric }
907    }
908
909    pub(crate) fn push_back(&mut self, message: OutgoingMessage<N>) {
910        self.messages.push_back(message);
911        self.count.increment(1);
912    }
913
914    pub(crate) fn pop_front(&mut self) -> Option<OutgoingMessage<N>> {
915        self.messages.pop_front().inspect(|_| self.count.decrement(1))
916    }
917
918    pub(crate) fn shrink_to_fit(&mut self) {
919        self.messages.shrink_to_fit();
920    }
921}
922
923impl<N: NetworkPrimitives> Drop for QueuedOutgoingMessages<N> {
924    fn drop(&mut self) {
925        // Ensure gauge is decremented for any remaining items to avoid metric leak on teardown.
926        let remaining = self.messages.len();
927        if remaining > 0 {
928            self.count.decrement(remaining as f64);
929        }
930    }
931}
932
933#[cfg(test)]
934mod tests {
935    use super::*;
936    use crate::session::{handle::PendingSessionEvent, start_pending_incoming_session};
937    use alloy_eips::eip2124::ForkFilter;
938    use reth_chainspec::MAINNET;
939    use reth_ecies::stream::ECIESStream;
940    use reth_eth_wire::{
941        handshake::EthHandshake, EthNetworkPrimitives, EthStream, GetBlockBodies,
942        HelloMessageWithProtocols, P2PStream, StatusBuilder, UnauthedEthStream, UnauthedP2PStream,
943        UnifiedStatus,
944    };
945    use reth_ethereum_forks::EthereumHardfork;
946    use reth_network_peers::pk2id;
947    use reth_network_types::session::config::PROTOCOL_BREACH_REQUEST_TIMEOUT;
948    use secp256k1::{SecretKey, SECP256K1};
949    use tokio::{
950        net::{TcpListener, TcpStream},
951        sync::mpsc,
952    };
953
954    /// Returns a testing `HelloMessage` and new secretkey
955    fn eth_hello(server_key: &SecretKey) -> HelloMessageWithProtocols {
956        HelloMessageWithProtocols::builder(pk2id(&server_key.public_key(SECP256K1))).build()
957    }
958
959    struct SessionBuilder<N: NetworkPrimitives = EthNetworkPrimitives> {
960        _remote_capabilities: Arc<Capabilities>,
961        active_session_tx: mpsc::Sender<ActiveSessionMessage<N>>,
962        active_session_rx: ReceiverStream<ActiveSessionMessage<N>>,
963        to_sessions: Vec<mpsc::Sender<SessionCommand<N>>>,
964        secret_key: SecretKey,
965        local_peer_id: PeerId,
966        hello: HelloMessageWithProtocols,
967        status: UnifiedStatus,
968        fork_filter: ForkFilter,
969        next_id: usize,
970    }
971
972    impl<N: NetworkPrimitives> SessionBuilder<N> {
973        fn next_id(&mut self) -> SessionId {
974            let id = self.next_id;
975            self.next_id += 1;
976            SessionId(id)
977        }
978
979        /// Connects a new Eth stream and executes the given closure with that established stream
980        fn with_client_stream<F, O>(
981            &self,
982            local_addr: SocketAddr,
983            f: F,
984        ) -> Pin<Box<dyn Future<Output = ()> + Send>>
985        where
986            F: FnOnce(EthStream<P2PStream<ECIESStream<TcpStream>>, N>) -> O + Send + 'static,
987            O: Future<Output = ()> + Send + Sync,
988        {
989            let mut status = self.status;
990            let fork_filter = self.fork_filter.clone();
991            let local_peer_id = self.local_peer_id;
992            let mut hello = self.hello.clone();
993            let key = SecretKey::new(&mut rand_08::thread_rng());
994            hello.id = pk2id(&key.public_key(SECP256K1));
995            Box::pin(async move {
996                let outgoing = TcpStream::connect(local_addr).await.unwrap();
997                let sink = ECIESStream::connect(outgoing, key, local_peer_id).await.unwrap();
998
999                let (p2p_stream, _) = UnauthedP2PStream::new(sink).handshake(hello).await.unwrap();
1000
1001                let eth_version = p2p_stream.shared_capabilities().eth_version().unwrap();
1002                status.set_eth_version(eth_version);
1003
1004                let (client_stream, _) = UnauthedEthStream::new(p2p_stream)
1005                    .handshake(status, fork_filter)
1006                    .await
1007                    .unwrap();
1008                f(client_stream).await
1009            })
1010        }
1011
1012        async fn connect_incoming(&mut self, stream: TcpStream) -> ActiveSession<N> {
1013            let remote_addr = stream.local_addr().unwrap();
1014            let session_id = self.next_id();
1015            let (_disconnect_tx, disconnect_rx) = oneshot::channel();
1016            let (pending_sessions_tx, pending_sessions_rx) = mpsc::channel(1);
1017
1018            tokio::task::spawn(start_pending_incoming_session(
1019                Arc::new(EthHandshake::default()),
1020                disconnect_rx,
1021                session_id,
1022                stream,
1023                pending_sessions_tx,
1024                remote_addr,
1025                self.secret_key,
1026                self.hello.clone(),
1027                self.status,
1028                self.fork_filter.clone(),
1029                Default::default(),
1030            ));
1031
1032            let mut stream = ReceiverStream::new(pending_sessions_rx);
1033
1034            match stream.next().await.unwrap() {
1035                PendingSessionEvent::Established {
1036                    session_id,
1037                    remote_addr,
1038                    peer_id,
1039                    capabilities,
1040                    conn,
1041                    ..
1042                } => {
1043                    let (_to_session_tx, messages_rx) = mpsc::channel(10);
1044                    let (commands_to_session, commands_rx) = mpsc::channel(10);
1045                    let poll_sender = PollSender::new(self.active_session_tx.clone());
1046
1047                    self.to_sessions.push(commands_to_session);
1048
1049                    ActiveSession {
1050                        next_id: 0,
1051                        remote_peer_id: peer_id,
1052                        remote_addr,
1053                        remote_capabilities: Arc::clone(&capabilities),
1054                        session_id,
1055                        commands_rx: ReceiverStream::new(commands_rx),
1056                        to_session_manager: MeteredPollSender::new(
1057                            poll_sender,
1058                            "network_active_session",
1059                        ),
1060                        pending_message_to_session: None,
1061                        internal_request_rx: ReceiverStream::new(messages_rx).fuse(),
1062                        inflight_requests: Default::default(),
1063                        conn,
1064                        queued_outgoing: QueuedOutgoingMessages::new(Gauge::noop()),
1065                        received_requests_from_remote: Default::default(),
1066                        internal_request_timeout_interval: tokio::time::interval(
1067                            INITIAL_REQUEST_TIMEOUT,
1068                        ),
1069                        internal_request_timeout: Arc::new(AtomicU64::new(
1070                            INITIAL_REQUEST_TIMEOUT.as_millis() as u64,
1071                        )),
1072                        protocol_breach_request_timeout: PROTOCOL_BREACH_REQUEST_TIMEOUT,
1073                        terminate_message: None,
1074                        range_info: None,
1075                        local_range_info: BlockRangeInfo::new(
1076                            0,
1077                            1000,
1078                            alloy_primitives::B256::ZERO,
1079                        ),
1080                        range_update_interval: None,
1081                        last_sent_latest_block: None,
1082                    }
1083                }
1084                ev => {
1085                    panic!("unexpected message {ev:?}")
1086                }
1087            }
1088        }
1089    }
1090
1091    impl Default for SessionBuilder {
1092        fn default() -> Self {
1093            let (active_session_tx, active_session_rx) = mpsc::channel(100);
1094
1095            let (secret_key, pk) = SECP256K1.generate_keypair(&mut rand_08::thread_rng());
1096            let local_peer_id = pk2id(&pk);
1097
1098            Self {
1099                next_id: 0,
1100                _remote_capabilities: Arc::new(Capabilities::from(vec![])),
1101                active_session_tx,
1102                active_session_rx: ReceiverStream::new(active_session_rx),
1103                to_sessions: vec![],
1104                hello: eth_hello(&secret_key),
1105                secret_key,
1106                local_peer_id,
1107                status: StatusBuilder::default().build(),
1108                fork_filter: MAINNET
1109                    .hardfork_fork_filter(EthereumHardfork::Frontier)
1110                    .expect("The Frontier fork filter should exist on mainnet"),
1111            }
1112        }
1113    }
1114
1115    #[tokio::test(flavor = "multi_thread")]
1116    async fn test_disconnect() {
1117        let mut builder = SessionBuilder::default();
1118
1119        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1120        let local_addr = listener.local_addr().unwrap();
1121
1122        let expected_disconnect = DisconnectReason::UselessPeer;
1123
1124        let fut = builder.with_client_stream(local_addr, move |mut client_stream| async move {
1125            let msg = client_stream.next().await.unwrap().unwrap_err();
1126            assert_eq!(msg.as_disconnected().unwrap(), expected_disconnect);
1127        });
1128
1129        tokio::task::spawn(async move {
1130            let (incoming, _) = listener.accept().await.unwrap();
1131            let mut session = builder.connect_incoming(incoming).await;
1132
1133            session.start_disconnect(expected_disconnect).unwrap();
1134            session.await
1135        });
1136
1137        fut.await;
1138    }
1139
1140    #[tokio::test(flavor = "multi_thread")]
1141    async fn handle_dropped_stream() {
1142        let mut builder = SessionBuilder::default();
1143
1144        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1145        let local_addr = listener.local_addr().unwrap();
1146
1147        let fut = builder.with_client_stream(local_addr, move |client_stream| async move {
1148            drop(client_stream);
1149            tokio::time::sleep(Duration::from_secs(1)).await
1150        });
1151
1152        let (tx, rx) = oneshot::channel();
1153
1154        tokio::task::spawn(async move {
1155            let (incoming, _) = listener.accept().await.unwrap();
1156            let session = builder.connect_incoming(incoming).await;
1157            session.await;
1158
1159            tx.send(()).unwrap();
1160        });
1161
1162        tokio::task::spawn(fut);
1163
1164        rx.await.unwrap();
1165    }
1166
1167    #[tokio::test(flavor = "multi_thread")]
1168    async fn test_send_many_messages() {
1169        reth_tracing::init_test_tracing();
1170        let mut builder = SessionBuilder::default();
1171
1172        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1173        let local_addr = listener.local_addr().unwrap();
1174
1175        let num_messages = 100;
1176
1177        let fut = builder.with_client_stream(local_addr, move |mut client_stream| async move {
1178            for _ in 0..num_messages {
1179                client_stream
1180                    .send(EthMessage::NewPooledTransactionHashes66(Vec::new().into()))
1181                    .await
1182                    .unwrap();
1183            }
1184        });
1185
1186        let (tx, rx) = oneshot::channel();
1187
1188        tokio::task::spawn(async move {
1189            let (incoming, _) = listener.accept().await.unwrap();
1190            let session = builder.connect_incoming(incoming).await;
1191            session.await;
1192
1193            tx.send(()).unwrap();
1194        });
1195
1196        tokio::task::spawn(fut);
1197
1198        rx.await.unwrap();
1199    }
1200
1201    #[tokio::test(flavor = "multi_thread")]
1202    async fn test_request_timeout() {
1203        reth_tracing::init_test_tracing();
1204
1205        let mut builder = SessionBuilder::default();
1206
1207        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1208        let local_addr = listener.local_addr().unwrap();
1209
1210        let request_timeout = Duration::from_millis(100);
1211        let drop_timeout = Duration::from_millis(1500);
1212
1213        let fut = builder.with_client_stream(local_addr, move |client_stream| async move {
1214            let _client_stream = client_stream;
1215            tokio::time::sleep(drop_timeout * 60).await;
1216        });
1217        tokio::task::spawn(fut);
1218
1219        let (incoming, _) = listener.accept().await.unwrap();
1220        let mut session = builder.connect_incoming(incoming).await;
1221        session
1222            .internal_request_timeout
1223            .store(request_timeout.as_millis() as u64, Ordering::Relaxed);
1224        session.protocol_breach_request_timeout = drop_timeout;
1225        session.internal_request_timeout_interval =
1226            tokio::time::interval_at(tokio::time::Instant::now(), request_timeout);
1227        let (tx, rx) = oneshot::channel();
1228        let req = PeerRequest::GetBlockBodies { request: GetBlockBodies(vec![]), response: tx };
1229        session.on_internal_peer_request(req, Instant::now());
1230        tokio::spawn(session);
1231
1232        let err = rx.await.unwrap().unwrap_err();
1233        assert_eq!(err, RequestError::Timeout);
1234
1235        // wait for protocol breach error
1236        let msg = builder.active_session_rx.next().await.unwrap();
1237        match msg {
1238            ActiveSessionMessage::ProtocolBreach { .. } => {}
1239            ev => unreachable!("{ev:?}"),
1240        }
1241    }
1242
1243    #[tokio::test(flavor = "multi_thread")]
1244    async fn test_keep_alive() {
1245        let mut builder = SessionBuilder::default();
1246
1247        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1248        let local_addr = listener.local_addr().unwrap();
1249
1250        let fut = builder.with_client_stream(local_addr, move |mut client_stream| async move {
1251            let _ = tokio::time::timeout(Duration::from_secs(5), client_stream.next()).await;
1252            client_stream.into_inner().disconnect(DisconnectReason::UselessPeer).await.unwrap();
1253        });
1254
1255        let (tx, rx) = oneshot::channel();
1256
1257        tokio::task::spawn(async move {
1258            let (incoming, _) = listener.accept().await.unwrap();
1259            let session = builder.connect_incoming(incoming).await;
1260            session.await;
1261
1262            tx.send(()).unwrap();
1263        });
1264
1265        tokio::task::spawn(fut);
1266
1267        rx.await.unwrap();
1268    }
1269
1270    // This tests that incoming messages are delivered when there's capacity.
1271    #[tokio::test(flavor = "multi_thread")]
1272    async fn test_send_at_capacity() {
1273        let mut builder = SessionBuilder::default();
1274
1275        let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
1276        let local_addr = listener.local_addr().unwrap();
1277
1278        let fut = builder.with_client_stream(local_addr, move |mut client_stream| async move {
1279            client_stream
1280                .send(EthMessage::NewPooledTransactionHashes68(Default::default()))
1281                .await
1282                .unwrap();
1283            let _ = tokio::time::timeout(Duration::from_secs(100), client_stream.next()).await;
1284        });
1285        tokio::task::spawn(fut);
1286
1287        let (incoming, _) = listener.accept().await.unwrap();
1288        let session = builder.connect_incoming(incoming).await;
1289
1290        // fill the entire message buffer with an unrelated message
1291        let mut num_fill_messages = 0;
1292        loop {
1293            if builder
1294                .active_session_tx
1295                .try_send(ActiveSessionMessage::ProtocolBreach { peer_id: PeerId::random() })
1296                .is_err()
1297            {
1298                break
1299            }
1300            num_fill_messages += 1;
1301        }
1302
1303        tokio::task::spawn(async move {
1304            session.await;
1305        });
1306
1307        tokio::time::sleep(Duration::from_millis(100)).await;
1308
1309        for _ in 0..num_fill_messages {
1310            let message = builder.active_session_rx.next().await.unwrap();
1311            match message {
1312                ActiveSessionMessage::ProtocolBreach { .. } => {}
1313                ev => unreachable!("{ev:?}"),
1314            }
1315        }
1316
1317        let message = builder.active_session_rx.next().await.unwrap();
1318        match message {
1319            ActiveSessionMessage::ValidMessage {
1320                message: PeerMessage::PooledTransactions(_),
1321                ..
1322            } => {}
1323            _ => unreachable!(),
1324        }
1325    }
1326
1327    #[test]
1328    fn timeout_calculation_sanity_tests() {
1329        let rtt = Duration::from_secs(5);
1330        // timeout for an RTT of `rtt`
1331        let timeout = rtt * TIMEOUT_SCALING;
1332
1333        // if rtt hasn't changed, timeout shouldn't change
1334        assert_eq!(calculate_new_timeout(timeout, rtt), timeout);
1335
1336        // if rtt changed, the new timeout should change less than it
1337        assert!(calculate_new_timeout(timeout, rtt / 2) < timeout);
1338        assert!(calculate_new_timeout(timeout, rtt / 2) > timeout / 2);
1339        assert!(calculate_new_timeout(timeout, rtt * 2) > timeout);
1340        assert!(calculate_new_timeout(timeout, rtt * 2) < timeout * 2);
1341    }
1342}