reth_network/transactions/
fetcher.rs

1//! `TransactionFetcher` is responsible for rate limiting and retry logic for fetching
2//! transactions. Upon receiving an announcement, functionality of the `TransactionFetcher` is
3//! used for filtering out hashes 1) for which the tx is already known and 2) unknown but the hash
4//! is already seen in a previous announcement. The hashes that remain from an announcement are
5//! then packed into a request with respect to the [`EthVersion`] of the announcement. Any hashes
6//! that don't fit into the request, are buffered in the `TransactionFetcher`. If on the other
7//! hand, space remains, hashes that the peer has previously announced are taken out of buffered
8//! hashes to fill the request up. The [`GetPooledTransactions`] request is then sent to the
9//! peer's session, this marks the peer as active with respect to
10//! `MAX_CONCURRENT_TX_REQUESTS_PER_PEER`.
11//!
12//! When a peer buffers hashes in the `TransactionsManager::on_new_pooled_transaction_hashes`
13//! pipeline, it is stored as fallback peer for those hashes. When [`TransactionsManager`] is
14//! polled, it checks if any of fallback peer is idle. If so, it packs a request for that peer,
15//! filling it from the buffered hashes. It does so until there are no more idle peers or until
16//! the hashes buffer is empty.
17//!
18//! If a [`GetPooledTransactions`] request resolves with an error, the hashes in the request are
19//! buffered with respect to `MAX_REQUEST_RETRIES_PER_TX_HASH`. So is the case if the request
20//! resolves with partial success, that is some of the requested hashes are not in the response,
21//! these are then buffered.
22//!
23//! Most healthy peers will send the same hashes in their announcements, as RLPx is a gossip
24//! protocol. This means it's unlikely, that a valid hash, will be buffered for very long
25//! before it's re-tried. Nonetheless, the capacity of the buffered hashes cache must be large
26//! enough to buffer many hashes during network failure, to allow for recovery.
27
28use super::{
29    config::TransactionFetcherConfig,
30    constants::{tx_fetcher::*, SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST},
31    PeerMetadata, PooledTransactions, SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
32};
33use crate::{
34    cache::{LruCache, LruMap},
35    duration_metered_exec,
36    metrics::TransactionFetcherMetrics,
37};
38use alloy_consensus::transaction::PooledTransaction;
39use alloy_primitives::TxHash;
40use derive_more::{Constructor, Deref};
41use futures::{stream::FuturesUnordered, Future, FutureExt, Stream, StreamExt};
42use pin_project::pin_project;
43use reth_eth_wire::{
44    DedupPayload, GetPooledTransactions, HandleMempoolData, HandleVersionedMempoolData,
45    PartiallyValidData, RequestTxHashes, ValidAnnouncementData,
46};
47use reth_eth_wire_types::{EthNetworkPrimitives, NetworkPrimitives};
48use reth_network_api::PeerRequest;
49use reth_network_p2p::error::{RequestError, RequestResult};
50use reth_network_peers::PeerId;
51use reth_primitives_traits::SignedTransaction;
52use schnellru::ByLength;
53use std::{
54    collections::HashMap,
55    pin::Pin,
56    task::{ready, Context, Poll},
57    time::Duration,
58};
59use tokio::sync::{mpsc::error::TrySendError, oneshot, oneshot::error::RecvError};
60use tracing::trace;
61
62/// The type responsible for fetching missing transactions from peers.
63///
64/// This will keep track of unique transaction hashes that are currently being fetched and submits
65/// new requests on announced hashes.
66#[derive(Debug)]
67#[pin_project]
68pub struct TransactionFetcher<N: NetworkPrimitives = EthNetworkPrimitives> {
69    /// All peers with to which a [`GetPooledTransactions`] request is inflight.
70    pub active_peers: LruMap<PeerId, u8, ByLength>,
71    /// All currently active [`GetPooledTransactions`] requests.
72    ///
73    /// The set of hashes encompassed by these requests are a subset of all hashes in the fetcher.
74    /// It's disjoint from the set of hashes which are awaiting an idle fallback peer in order to
75    /// be fetched.
76    #[pin]
77    pub inflight_requests: FuturesUnordered<GetPooledTxRequestFut<N::PooledTransaction>>,
78    /// Hashes that are awaiting an idle fallback peer so they can be fetched.
79    ///
80    /// This is a subset of all hashes in the fetcher, and is disjoint from the set of hashes for
81    /// which a [`GetPooledTransactions`] request is inflight.
82    pub hashes_pending_fetch: LruCache<TxHash>,
83    /// Tracks all hashes in the transaction fetcher.
84    pub hashes_fetch_inflight_and_pending_fetch: LruMap<TxHash, TxFetchMetadata, ByLength>,
85    /// Info on capacity of the transaction fetcher.
86    pub info: TransactionFetcherInfo,
87    #[doc(hidden)]
88    metrics: TransactionFetcherMetrics,
89}
90
91impl<N: NetworkPrimitives> TransactionFetcher<N> {
92    /// Removes the peer from the active set.
93    pub(crate) fn remove_peer(&mut self, peer_id: &PeerId) {
94        self.active_peers.remove(peer_id);
95    }
96
97    /// Updates metrics.
98    #[inline]
99    pub fn update_metrics(&self) {
100        let metrics = &self.metrics;
101
102        metrics.inflight_transaction_requests.set(self.inflight_requests.len() as f64);
103
104        let hashes_pending_fetch = self.hashes_pending_fetch.len() as f64;
105        let total_hashes = self.hashes_fetch_inflight_and_pending_fetch.len() as f64;
106
107        metrics.hashes_pending_fetch.set(hashes_pending_fetch);
108        metrics.hashes_inflight_transaction_requests.set(total_hashes - hashes_pending_fetch);
109    }
110
111    #[inline]
112    fn update_pending_fetch_cache_search_metrics(&self, durations: TxFetcherSearchDurations) {
113        let metrics = &self.metrics;
114
115        let TxFetcherSearchDurations { find_idle_peer, fill_request } = durations;
116        metrics
117            .duration_find_idle_fallback_peer_for_any_pending_hash
118            .set(find_idle_peer.as_secs_f64());
119        metrics.duration_fill_request_from_hashes_pending_fetch.set(fill_request.as_secs_f64());
120    }
121
122    /// Sets up transaction fetcher with config
123    pub fn with_transaction_fetcher_config(config: &TransactionFetcherConfig) -> Self {
124        let TransactionFetcherConfig {
125            max_inflight_requests,
126            max_capacity_cache_txns_pending_fetch,
127            ..
128        } = *config;
129
130        let info = config.clone().into();
131
132        let metrics = TransactionFetcherMetrics::default();
133        metrics.capacity_inflight_requests.increment(max_inflight_requests as u64);
134
135        Self {
136            active_peers: LruMap::new(max_inflight_requests),
137            hashes_pending_fetch: LruCache::new(max_capacity_cache_txns_pending_fetch),
138            hashes_fetch_inflight_and_pending_fetch: LruMap::new(
139                max_inflight_requests + max_capacity_cache_txns_pending_fetch,
140            ),
141            info,
142            metrics,
143            ..Default::default()
144        }
145    }
146
147    /// Removes the specified hashes from inflight tracking.
148    #[inline]
149    pub fn remove_hashes_from_transaction_fetcher<'a, I>(&mut self, hashes: I)
150    where
151        I: IntoIterator<Item = &'a TxHash>,
152    {
153        for hash in hashes {
154            self.hashes_fetch_inflight_and_pending_fetch.remove(hash);
155            self.hashes_pending_fetch.remove(hash);
156        }
157    }
158
159    /// Updates peer's activity status upon a resolved [`GetPooledTxRequest`].
160    fn decrement_inflight_request_count_for(&mut self, peer_id: &PeerId) {
161        let remove = || -> bool {
162            if let Some(inflight_count) = self.active_peers.get(peer_id) {
163                *inflight_count = inflight_count.saturating_sub(1);
164                if *inflight_count == 0 {
165                    return true
166                }
167            }
168            false
169        }();
170
171        if remove {
172            self.active_peers.remove(peer_id);
173        }
174    }
175
176    /// Returns `true` if peer is idle with respect to `self.inflight_requests`.
177    #[inline]
178    pub fn is_idle(&self, peer_id: &PeerId) -> bool {
179        let Some(inflight_count) = self.active_peers.peek(peer_id) else { return true };
180        if *inflight_count < self.info.max_inflight_requests_per_peer {
181            return true
182        }
183        false
184    }
185
186    /// Returns any idle peer for the given hash.
187    pub fn get_idle_peer_for(&self, hash: TxHash) -> Option<&PeerId> {
188        let TxFetchMetadata { fallback_peers, .. } =
189            self.hashes_fetch_inflight_and_pending_fetch.peek(&hash)?;
190
191        fallback_peers.iter().find(|peer_id| self.is_idle(peer_id))
192    }
193
194    /// Returns any idle peer for any hash pending fetch. If one is found, the corresponding
195    /// hash is written to the request buffer that is passed as parameter.
196    ///
197    /// Loops through the hashes pending fetch in lru order until one is found with an idle
198    /// fallback peer, or the budget passed as parameter is depleted, whatever happens first.
199    pub fn find_any_idle_fallback_peer_for_any_pending_hash(
200        &mut self,
201        hashes_to_request: &mut RequestTxHashes,
202        mut budget: Option<usize>, // search fallback peers for max `budget` lru pending hashes
203    ) -> Option<PeerId> {
204        let mut hashes_pending_fetch_iter = self.hashes_pending_fetch.iter();
205
206        let idle_peer = loop {
207            let &hash = hashes_pending_fetch_iter.next()?;
208
209            let idle_peer = self.get_idle_peer_for(hash);
210
211            if idle_peer.is_some() {
212                hashes_to_request.insert(hash);
213                break idle_peer.copied()
214            }
215
216            if let Some(ref mut bud) = budget {
217                *bud = bud.saturating_sub(1);
218                if *bud == 0 {
219                    return None
220                }
221            }
222        };
223        let hash = hashes_to_request.iter().next()?;
224
225        // pop hash that is loaded in request buffer from cache of hashes pending fetch
226        drop(hashes_pending_fetch_iter);
227        _ = self.hashes_pending_fetch.remove(hash);
228
229        idle_peer
230    }
231
232    /// Packages hashes for a [`GetPooledTxRequest`] up to limit. Returns left over hashes. Takes
233    /// a [`RequestTxHashes`] buffer as parameter for filling with hashes to request.
234    ///
235    /// Returns left over hashes.
236    pub fn pack_request(
237        &self,
238        hashes_to_request: &mut RequestTxHashes,
239        hashes_from_announcement: ValidAnnouncementData,
240    ) -> RequestTxHashes {
241        if hashes_from_announcement.msg_version().is_eth68() {
242            return self.pack_request_eth68(hashes_to_request, hashes_from_announcement)
243        }
244        self.pack_request_eth66(hashes_to_request, hashes_from_announcement)
245    }
246
247    /// Packages hashes for a [`GetPooledTxRequest`] from an
248    /// [`Eth68`](reth_eth_wire::EthVersion::Eth68) announcement up to limit as defined by protocol
249    /// version 68. Takes a [`RequestTxHashes`] buffer as parameter for filling with hashes to
250    /// request.
251    ///
252    /// Returns left over hashes.
253    ///
254    /// Loops through hashes passed as parameter and checks if a hash fits in the expected
255    /// response. If no, it's added to surplus hashes. If yes, it's added to hashes to the request
256    /// and expected response size is accumulated.
257    pub fn pack_request_eth68(
258        &self,
259        hashes_to_request: &mut RequestTxHashes,
260        hashes_from_announcement: impl HandleMempoolData
261            + IntoIterator<Item = (TxHash, Option<(u8, usize)>)>,
262    ) -> RequestTxHashes {
263        let mut acc_size_response = 0;
264
265        let mut hashes_from_announcement_iter = hashes_from_announcement.into_iter();
266
267        if let Some((hash, Some((_ty, size)))) = hashes_from_announcement_iter.next() {
268            hashes_to_request.insert(hash);
269
270            // tx is really big, pack request with single tx
271            if size >= self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request {
272                return hashes_from_announcement_iter.collect()
273            }
274            acc_size_response = size;
275        }
276
277        let mut surplus_hashes = RequestTxHashes::default();
278
279        // folds size based on expected response size  and adds selected hashes to the request
280        // list and the other hashes to the surplus list
281        for (hash, metadata) in hashes_from_announcement_iter.by_ref() {
282            let Some((_ty, size)) = metadata else {
283                unreachable!("this method is called upon reception of an eth68 announcement")
284            };
285
286            let next_acc_size = acc_size_response + size;
287
288            if next_acc_size <=
289                self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request
290            {
291                // only update accumulated size of tx response if tx will fit in without exceeding
292                // soft limit
293                acc_size_response = next_acc_size;
294                _ = hashes_to_request.insert(hash)
295            } else {
296                _ = surplus_hashes.insert(hash)
297            }
298
299            let free_space =
300                self.info.soft_limit_byte_size_pooled_transactions_response_on_pack_request -
301                    acc_size_response;
302
303            if free_space < MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED {
304                break
305            }
306        }
307
308        surplus_hashes.extend(hashes_from_announcement_iter.map(|(hash, _metadata)| hash));
309
310        surplus_hashes
311    }
312
313    /// Packages hashes for a [`GetPooledTxRequest`] from an
314    /// [`Eth66`](reth_eth_wire::EthVersion::Eth66) announcement up to limit as defined by
315    /// protocol version 66. Takes a [`RequestTxHashes`] buffer as parameter for filling with
316    /// hashes to request.
317    ///
318    /// Returns left over hashes.
319    pub fn pack_request_eth66(
320        &self,
321        hashes_to_request: &mut RequestTxHashes,
322        hashes_from_announcement: ValidAnnouncementData,
323    ) -> RequestTxHashes {
324        let (mut hashes, _version) = hashes_from_announcement.into_request_hashes();
325        if hashes.len() <= SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST {
326            *hashes_to_request = hashes;
327            hashes_to_request.shrink_to_fit();
328
329            RequestTxHashes::default()
330        } else {
331            let surplus_hashes =
332                hashes.retain_count(SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST);
333            *hashes_to_request = hashes;
334            hashes_to_request.shrink_to_fit();
335
336            surplus_hashes
337        }
338    }
339
340    /// Tries to buffer hashes for retry.
341    pub fn try_buffer_hashes_for_retry(
342        &mut self,
343        mut hashes: RequestTxHashes,
344        peer_failed_to_serve: &PeerId,
345    ) {
346        // It could be that the txns have been received over broadcast in the time being. Remove
347        // the peer as fallback peer so it isn't request again for these hashes.
348        hashes.retain(|hash| {
349            if let Some(entry) = self.hashes_fetch_inflight_and_pending_fetch.get(hash) {
350                entry.fallback_peers_mut().remove(peer_failed_to_serve);
351                return true
352            }
353            // tx has been seen over broadcast in the time it took for the request to resolve
354            false
355        });
356
357        self.buffer_hashes(hashes, None)
358    }
359
360    /// Number of hashes pending fetch.
361    pub fn num_pending_hashes(&self) -> usize {
362        self.hashes_pending_fetch.len()
363    }
364
365    /// Number of all transaction hashes in the fetcher.
366    pub fn num_all_hashes(&self) -> usize {
367        self.hashes_fetch_inflight_and_pending_fetch.len()
368    }
369
370    /// Buffers hashes. Note: Only peers that haven't yet tried to request the hashes should be
371    /// passed as `fallback_peer` parameter! For re-buffering hashes on failed request, use
372    /// [`TransactionFetcher::try_buffer_hashes_for_retry`]. Hashes that have been re-requested
373    /// [`DEFAULT_MAX_RETRIES`], are dropped.
374    pub fn buffer_hashes(&mut self, hashes: RequestTxHashes, fallback_peer: Option<PeerId>) {
375        for hash in hashes {
376            // hash could have been evicted from bounded lru map
377            if self.hashes_fetch_inflight_and_pending_fetch.peek(&hash).is_none() {
378                continue
379            }
380
381            let Some(TxFetchMetadata { retries, fallback_peers, .. }) =
382                self.hashes_fetch_inflight_and_pending_fetch.get(&hash)
383            else {
384                return
385            };
386
387            if let Some(peer_id) = fallback_peer {
388                // peer has not yet requested hash
389                fallback_peers.insert(peer_id);
390            } else {
391                if *retries >= DEFAULT_MAX_RETRIES {
392                    trace!(target: "net::tx",
393                        %hash,
394                        retries,
395                        "retry limit for `GetPooledTransactions` requests reached for hash, dropping hash"
396                    );
397
398                    self.hashes_fetch_inflight_and_pending_fetch.remove(&hash);
399                    self.hashes_pending_fetch.remove(&hash);
400                    continue
401                }
402                *retries += 1;
403            }
404
405            if let (_, Some(evicted_hash)) = self.hashes_pending_fetch.insert_and_get_evicted(hash)
406            {
407                self.hashes_fetch_inflight_and_pending_fetch.remove(&evicted_hash);
408            }
409        }
410    }
411
412    /// Tries to request hashes pending fetch.
413    ///
414    /// Finds the first buffered hash with a fallback peer that is idle, if any. Fills the rest of
415    /// the request by checking the transactions seen by the peer against the buffer.
416    pub fn on_fetch_pending_hashes(
417        &mut self,
418        peers: &HashMap<PeerId, PeerMetadata<N>>,
419        has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
420    ) -> bool {
421        let mut hashes_to_request = RequestTxHashes::with_capacity(
422            DEFAULT_MARGINAL_COUNT_HASHES_GET_POOLED_TRANSACTIONS_REQUEST,
423        );
424        let mut search_durations = TxFetcherSearchDurations::default();
425
426        // budget to look for an idle peer before giving up
427        let budget_find_idle_fallback_peer = self
428            .search_breadth_budget_find_idle_fallback_peer(&has_capacity_wrt_pending_pool_imports);
429
430        let peer_id = duration_metered_exec!(
431            {
432                let Some(peer_id) = self.find_any_idle_fallback_peer_for_any_pending_hash(
433                    &mut hashes_to_request,
434                    budget_find_idle_fallback_peer,
435                ) else {
436                    // no peers are idle or budget is depleted
437                    return false
438                };
439
440                peer_id
441            },
442            search_durations.find_idle_peer
443        );
444
445        // peer should always exist since `is_session_active` already checked
446        let Some(peer) = peers.get(&peer_id) else { return false };
447        let conn_eth_version = peer.version;
448
449        // fill the request with more hashes pending fetch that have been announced by the peer.
450        // the search for more hashes is done with respect to the given budget, which determines
451        // how many hashes to loop through before giving up. if no more hashes are found wrt to
452        // the budget, the single hash that was taken out of the cache above is sent in a request.
453        let budget_fill_request = self
454            .search_breadth_budget_find_intersection_pending_hashes_and_hashes_seen_by_peer(
455                &has_capacity_wrt_pending_pool_imports,
456            );
457
458        duration_metered_exec!(
459            {
460                self.fill_request_from_hashes_pending_fetch(
461                    &mut hashes_to_request,
462                    &peer.seen_transactions,
463                    budget_fill_request,
464                )
465            },
466            search_durations.fill_request
467        );
468
469        self.update_pending_fetch_cache_search_metrics(search_durations);
470
471        trace!(target: "net::tx",
472            peer_id=format!("{peer_id:#}"),
473            hashes=?*hashes_to_request,
474            %conn_eth_version,
475            "requesting hashes that were stored pending fetch from peer"
476        );
477
478        // request the buffered missing transactions
479        if let Some(failed_to_request_hashes) =
480            self.request_transactions_from_peer(hashes_to_request, peer)
481        {
482            trace!(target: "net::tx",
483                peer_id=format!("{peer_id:#}"),
484                ?failed_to_request_hashes,
485                %conn_eth_version,
486                "failed sending request to peer's session, buffering hashes"
487            );
488
489            self.buffer_hashes(failed_to_request_hashes, Some(peer_id));
490            return false
491        }
492
493        true
494    }
495
496    /// Filters out hashes that have been seen before. For hashes that have already been seen, the
497    /// peer is added as fallback peer.
498    pub fn filter_unseen_and_pending_hashes(
499        &mut self,
500        new_announced_hashes: &mut ValidAnnouncementData,
501        is_tx_bad_import: impl Fn(&TxHash) -> bool,
502        peer_id: &PeerId,
503        client_version: &str,
504    ) {
505        let mut previously_unseen_hashes_count = 0;
506
507        let msg_version = new_announced_hashes.msg_version();
508
509        // filter out inflight hashes, and register the peer as fallback for all inflight hashes
510        new_announced_hashes.retain(|hash, metadata| {
511
512            // occupied entry
513            if let Some(TxFetchMetadata{ tx_encoded_length: previously_seen_size, ..}) = self.hashes_fetch_inflight_and_pending_fetch.peek_mut(hash) {
514                // update size metadata if available
515                if let Some((_ty, size)) = metadata {
516                    if let Some(prev_size) = previously_seen_size {
517                        // check if this peer is announcing a different size than a previous peer
518                        if size != prev_size {
519                            trace!(target: "net::tx",
520                                peer_id=format!("{peer_id:#}"),
521                                %hash,
522                                size,
523                                previously_seen_size,
524                                %client_version,
525                                "peer announced a different size for tx, this is especially worrying if one size is much bigger..."
526                            );
527                        }
528                    }
529                    // believe the most recent peer to announce tx
530                    *previously_seen_size = Some(*size);
531                }
532
533                // hash has been seen but is not inflight
534                if self.hashes_pending_fetch.remove(hash) {
535                    return true
536                }
537
538                return false
539            }
540
541            // vacant entry
542
543            if is_tx_bad_import(hash) {
544                return false
545            }
546
547            previously_unseen_hashes_count += 1;
548
549            if self.hashes_fetch_inflight_and_pending_fetch.get_or_insert(*hash, ||
550                TxFetchMetadata{retries: 0, fallback_peers: LruCache::new(DEFAULT_MAX_COUNT_FALLBACK_PEERS as u32), tx_encoded_length: None}
551            ).is_none() {
552
553                trace!(target: "net::tx",
554                    peer_id=format!("{peer_id:#}"),
555                    %hash,
556                    ?msg_version,
557                    %client_version,
558                    "failed to cache new announced hash from peer in schnellru::LruMap, dropping hash"
559                );
560
561                return false
562            }
563            true
564        });
565
566        trace!(target: "net::tx",
567            peer_id=format!("{peer_id:#}"),
568            previously_unseen_hashes_count=previously_unseen_hashes_count,
569            msg_version=?msg_version,
570            client_version=%client_version,
571            "received previously unseen hashes in announcement from peer"
572        );
573    }
574
575    /// Requests the missing transactions from the previously unseen announced hashes of the peer.
576    /// Returns the requested hashes if the request concurrency limit is reached or if the request
577    /// fails to send over the channel to the peer's session task.
578    ///
579    /// This filters all announced hashes that are already in flight, and requests the missing,
580    /// while marking the given peer as an alternative peer for the hashes that are already in
581    /// flight.
582    pub fn request_transactions_from_peer(
583        &mut self,
584        new_announced_hashes: RequestTxHashes,
585        peer: &PeerMetadata<N>,
586    ) -> Option<RequestTxHashes> {
587        let peer_id: PeerId = peer.request_tx.peer_id;
588        let conn_eth_version = peer.version;
589
590        if self.active_peers.len() >= self.info.max_inflight_requests {
591            trace!(target: "net::tx",
592                peer_id=format!("{peer_id:#}"),
593                hashes=?*new_announced_hashes,
594                %conn_eth_version,
595                max_inflight_transaction_requests=self.info.max_inflight_requests,
596                "limit for concurrent `GetPooledTransactions` requests reached, dropping request for hashes to peer"
597            );
598            return Some(new_announced_hashes)
599        }
600
601        let Some(inflight_count) = self.active_peers.get_or_insert(peer_id, || 0) else {
602            trace!(target: "net::tx",
603                peer_id=format!("{peer_id:#}"),
604                hashes=?*new_announced_hashes,
605                conn_eth_version=%conn_eth_version,
606                "failed to cache active peer in schnellru::LruMap, dropping request to peer"
607            );
608            return Some(new_announced_hashes)
609        };
610
611        if *inflight_count >= self.info.max_inflight_requests_per_peer {
612            trace!(target: "net::tx",
613                peer_id=format!("{peer_id:#}"),
614                hashes=?*new_announced_hashes,
615                %conn_eth_version,
616                max_concurrent_tx_reqs_per_peer=self.info.max_inflight_requests_per_peer,
617                "limit for concurrent `GetPooledTransactions` requests per peer reached"
618            );
619            return Some(new_announced_hashes)
620        }
621
622        #[cfg(debug_assertions)]
623        {
624            for hash in &new_announced_hashes {
625                if self.hashes_pending_fetch.contains(hash) {
626                    tracing::debug!(target: "net::tx", "`{}` should have been taken out of buffer before packing in a request, breaks invariant `@hashes_pending_fetch` and `@inflight_requests`, `@hashes_fetch_inflight_and_pending_fetch` for `{}`: {:?}",
627                        format!("{:?}", new_announced_hashes), // Assuming new_announced_hashes can be debug-printed directly
628                        format!("{:?}", new_announced_hashes),
629                        new_announced_hashes.iter().map(|hash| {
630                            let metadata = self.hashes_fetch_inflight_and_pending_fetch.get(hash);
631                            // Assuming you only need `retries` and `tx_encoded_length` for debugging
632                            (*hash, metadata.map(|m| (m.retries, m.tx_encoded_length)))
633                        }).collect::<Vec<(TxHash, Option<(u8, Option<usize>)>)>>())
634                }
635            }
636        }
637
638        let (response, rx) = oneshot::channel();
639        let req = PeerRequest::GetPooledTransactions {
640            request: GetPooledTransactions(new_announced_hashes.iter().copied().collect()),
641            response,
642        };
643
644        // try to send the request to the peer
645        if let Err(err) = peer.request_tx.try_send(req) {
646            // peer channel is full
647            return match err {
648                TrySendError::Full(_) | TrySendError::Closed(_) => {
649                    self.metrics.egress_peer_channel_full.increment(1);
650                    Some(new_announced_hashes)
651                }
652            }
653        }
654
655        *inflight_count += 1;
656        // stores a new request future for the request
657        self.inflight_requests.push(GetPooledTxRequestFut::new(peer_id, new_announced_hashes, rx));
658
659        None
660    }
661
662    /// Tries to fill request with hashes pending fetch so that the expected [`PooledTransactions`]
663    /// response is full enough. A mutable reference to a list of hashes to request is passed as
664    /// parameter. A budget is passed as parameter, this ensures that the node stops searching
665    /// for more hashes after the budget is depleted. Under bad network conditions, the cache of
666    /// hashes pending fetch may become very full for a while. As the node recovers, the hashes
667    /// pending fetch cache should get smaller. The budget should aim to be big enough to loop
668    /// through all buffered hashes in good network conditions.
669    ///
670    /// The request hashes buffer is filled as if it's an eth68 request, i.e. smartly assemble
671    /// the request based on expected response size. For any hash missing size metadata, it is
672    /// guessed at [`AVERAGE_BYTE_SIZE_TX_ENCODED`].
673    ///
674    /// Loops through hashes pending fetch and does:
675    ///
676    /// 1. Check if a hash pending fetch is seen by peer.
677    /// 2. Optimistically include the hash in the request.
678    /// 3. Accumulate expected total response size.
679    /// 4. Check if acc size and hashes count is at limit, if so stop looping.
680    /// 5. Remove hashes to request from cache of hashes pending fetch.
681    pub fn fill_request_from_hashes_pending_fetch(
682        &mut self,
683        hashes_to_request: &mut RequestTxHashes,
684        seen_hashes: &LruCache<TxHash>,
685        mut budget_fill_request: Option<usize>, // check max `budget` lru pending hashes
686    ) {
687        let Some(hash) = hashes_to_request.iter().next() else { return };
688
689        let mut acc_size_response = self
690            .hashes_fetch_inflight_and_pending_fetch
691            .get(hash)
692            .and_then(|entry| entry.tx_encoded_len())
693            .unwrap_or(AVERAGE_BYTE_SIZE_TX_ENCODED);
694
695        // if request full enough already, we're satisfied, send request for single tx
696        if acc_size_response >=
697            DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE_ON_FETCH_PENDING_HASHES
698        {
699            return
700        }
701
702        // try to fill request by checking if any other hashes pending fetch (in lru order) are
703        // also seen by peer
704        for hash in self.hashes_pending_fetch.iter() {
705            // 1. Check if a hash pending fetch is seen by peer.
706            if !seen_hashes.contains(hash) {
707                continue
708            };
709
710            // 2. Optimistically include the hash in the request.
711            hashes_to_request.insert(*hash);
712
713            // 3. Accumulate expected total response size.
714            let size = self
715                .hashes_fetch_inflight_and_pending_fetch
716                .get(hash)
717                .and_then(|entry| entry.tx_encoded_len())
718                .unwrap_or(AVERAGE_BYTE_SIZE_TX_ENCODED);
719
720            acc_size_response += size;
721
722            // 4. Check if acc size or hashes count is at limit, if so stop looping.
723            // if expected response is full enough or the number of hashes in the request is
724            // enough, we're satisfied
725            if acc_size_response >=
726                DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE_ON_FETCH_PENDING_HASHES ||
727                hashes_to_request.len() >
728                    DEFAULT_SOFT_LIMIT_COUNT_HASHES_IN_GET_POOLED_TRANSACTIONS_REQUEST_ON_FETCH_PENDING_HASHES
729            {
730                break
731            }
732
733            if let Some(ref mut bud) = budget_fill_request {
734                *bud -= 1;
735                if *bud == 0 {
736                    break
737                }
738            }
739        }
740
741        // 5. Remove hashes to request from cache of hashes pending fetch.
742        for hash in hashes_to_request.iter() {
743            self.hashes_pending_fetch.remove(hash);
744        }
745    }
746
747    /// Returns `true` if [`TransactionFetcher`] has capacity to request pending hashes. Returns
748    /// `false` if [`TransactionFetcher`] is operating close to full capacity.
749    pub fn has_capacity_for_fetching_pending_hashes(&self) -> bool {
750        let info = &self.info;
751
752        self.has_capacity(info.max_inflight_requests)
753    }
754
755    /// Returns `true` if the number of inflight requests are under a given tolerated max.
756    fn has_capacity(&self, max_inflight_requests: usize) -> bool {
757        self.inflight_requests.len() <= max_inflight_requests
758    }
759
760    /// Returns the limit to enforce when looking for any pending hash with an idle fallback peer.
761    ///
762    /// Returns `Some(limit)` if [`TransactionFetcher`] and the
763    /// [`TransactionPool`](reth_transaction_pool::TransactionPool) are operating close to full
764    /// capacity. Returns `None`, unlimited, if they are not that busy.
765    pub fn search_breadth_budget_find_idle_fallback_peer(
766        &self,
767        has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
768    ) -> Option<usize> {
769        let info = &self.info;
770
771        let tx_fetcher_has_capacity = self.has_capacity(
772            info.max_inflight_requests /
773                DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_IDLE_PEER,
774        );
775        let tx_pool_has_capacity = has_capacity_wrt_pending_pool_imports(
776            DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_IDLE_PEER,
777        );
778
779        if tx_fetcher_has_capacity && tx_pool_has_capacity {
780            // unlimited search breadth
781            None
782        } else {
783            // limited breadth of search for idle peer
784            let limit = DEFAULT_BUDGET_FIND_IDLE_FALLBACK_PEER;
785
786            trace!(target: "net::tx",
787                inflight_requests=self.inflight_requests.len(),
788                max_inflight_transaction_requests=info.max_inflight_requests,
789                hashes_pending_fetch=self.hashes_pending_fetch.len(),
790                limit,
791                "search breadth limited in search for idle fallback peer for some hash pending fetch"
792            );
793
794            Some(limit)
795        }
796    }
797
798    /// Returns the limit to enforce when looking for the intersection between hashes announced by
799    /// peer and hashes pending fetch.
800    ///
801    /// Returns `Some(limit)` if [`TransactionFetcher`] and the
802    /// [`TransactionPool`](reth_transaction_pool::TransactionPool) are operating close to full
803    /// capacity. Returns `None`, unlimited, if they are not that busy.
804    pub fn search_breadth_budget_find_intersection_pending_hashes_and_hashes_seen_by_peer(
805        &self,
806        has_capacity_wrt_pending_pool_imports: impl Fn(usize) -> bool,
807    ) -> Option<usize> {
808        let info = &self.info;
809
810        let tx_fetcher_has_capacity = self.has_capacity(
811            info.max_inflight_requests /
812                DEFAULT_DIVISOR_MAX_COUNT_INFLIGHT_REQUESTS_ON_FIND_INTERSECTION,
813        );
814        let tx_pool_has_capacity = has_capacity_wrt_pending_pool_imports(
815            DEFAULT_DIVISOR_MAX_COUNT_PENDING_POOL_IMPORTS_ON_FIND_INTERSECTION,
816        );
817
818        if tx_fetcher_has_capacity && tx_pool_has_capacity {
819            // unlimited search breadth
820            None
821        } else {
822            // limited breadth of search for idle peer
823            let limit = DEFAULT_BUDGET_FIND_INTERSECTION_ANNOUNCED_BY_PEER_AND_PENDING_FETCH;
824
825            trace!(target: "net::tx",
826                inflight_requests=self.inflight_requests.len(),
827                max_inflight_transaction_requests=self.info.max_inflight_requests,
828                hashes_pending_fetch=self.hashes_pending_fetch.len(),
829                limit=limit,
830                "search breadth limited in search for intersection of hashes announced by peer and hashes pending fetch"
831            );
832
833            Some(limit)
834        }
835    }
836
837    /// Processes a resolved [`GetPooledTransactions`] request. Queues the outcome as a
838    /// [`FetchEvent`], which will then be streamed by
839    /// [`TransactionsManager`](super::TransactionsManager).
840    pub fn on_resolved_get_pooled_transactions_request_fut(
841        &mut self,
842        response: GetPooledTxResponse<N::PooledTransaction>,
843    ) -> FetchEvent<N::PooledTransaction> {
844        // update peer activity, requests for buffered hashes can only be made to idle
845        // fallback peers
846        let GetPooledTxResponse { peer_id, mut requested_hashes, result } = response;
847
848        self.decrement_inflight_request_count_for(&peer_id);
849
850        match result {
851            Ok(Ok(transactions)) => {
852                //
853                // 1. peer has failed to serve any of the hashes it has announced to us that we,
854                // as a follow, have requested
855                //
856                if transactions.is_empty() {
857                    trace!(target: "net::tx",
858                        peer_id=format!("{peer_id:#}"),
859                        requested_hashes_len=requested_hashes.len(),
860                        "received empty `PooledTransactions` response from peer, peer failed to serve hashes it announced"
861                    );
862
863                    return FetchEvent::EmptyResponse { peer_id }
864                }
865
866                //
867                // 2. filter out hashes that we didn't request
868                //
869                let payload = UnverifiedPooledTransactions::new(transactions);
870
871                let unverified_len = payload.len();
872                let (verification_outcome, verified_payload) =
873                    payload.verify(&requested_hashes, &peer_id);
874
875                let unsolicited = unverified_len - verified_payload.len();
876                if unsolicited > 0 {
877                    self.metrics.unsolicited_transactions.increment(unsolicited as u64);
878                }
879
880                let report_peer = if verification_outcome == VerificationOutcome::ReportPeer {
881                    trace!(target: "net::tx",
882                        peer_id=format!("{peer_id:#}"),
883                        unverified_len,
884                        verified_payload_len=verified_payload.len(),
885                        "received `PooledTransactions` response from peer with entries that didn't verify against request, filtered out transactions"
886                    );
887                    true
888                } else {
889                    false
890                };
891
892                // peer has only sent hashes that we didn't request
893                if verified_payload.is_empty() {
894                    return FetchEvent::FetchError { peer_id, error: RequestError::BadResponse }
895                }
896
897                //
898                // 3. stateless validation of payload, e.g. dedup
899                //
900                let unvalidated_payload_len = verified_payload.len();
901
902                let valid_payload = verified_payload.dedup();
903
904                // todo: validate based on announced tx size/type and report peer for sending
905                // invalid response <https://github.com/paradigmxyz/reth/issues/6529>. requires
906                // passing the rlp encoded length down from active session along with the decoded
907                // tx.
908
909                if valid_payload.len() != unvalidated_payload_len {
910                    trace!(target: "net::tx",
911                    peer_id=format!("{peer_id:#}"),
912                    unvalidated_payload_len,
913                    valid_payload_len=valid_payload.len(),
914                    "received `PooledTransactions` response from peer with duplicate entries, filtered them out"
915                    );
916                }
917                // valid payload will have at least one transaction at this point. even if the tx
918                // size/type announced by the peer is different to the actual tx size/type, pass on
919                // to pending pool imports pipeline for validation.
920
921                //
922                // 4. clear received hashes
923                //
924                let requested_hashes_len = requested_hashes.len();
925                let mut fetched = Vec::with_capacity(valid_payload.len());
926                requested_hashes.retain(|requested_hash| {
927                    if valid_payload.contains_key(requested_hash) {
928                        // hash is now known, stop tracking
929                        fetched.push(*requested_hash);
930                        return false
931                    }
932                    true
933                });
934                fetched.shrink_to_fit();
935                self.metrics.fetched_transactions.increment(fetched.len() as u64);
936
937                if fetched.len() < requested_hashes_len {
938                    trace!(target: "net::tx",
939                        peer_id=format!("{peer_id:#}"),
940                        requested_hashes_len=requested_hashes_len,
941                        fetched_len=fetched.len(),
942                        "peer failed to serve hashes it announced"
943                    );
944                }
945
946                //
947                // 5. buffer left over hashes
948                //
949                self.try_buffer_hashes_for_retry(requested_hashes, &peer_id);
950
951                let transactions = valid_payload.into_data().into_values().collect();
952
953                FetchEvent::TransactionsFetched { peer_id, transactions, report_peer }
954            }
955            Ok(Err(req_err)) => {
956                self.try_buffer_hashes_for_retry(requested_hashes, &peer_id);
957                FetchEvent::FetchError { peer_id, error: req_err }
958            }
959            Err(_) => {
960                self.try_buffer_hashes_for_retry(requested_hashes, &peer_id);
961                // request channel closed/dropped
962                FetchEvent::FetchError { peer_id, error: RequestError::ChannelClosed }
963            }
964        }
965    }
966}
967
968impl<N: NetworkPrimitives> Stream for TransactionFetcher<N> {
969    type Item = FetchEvent<N::PooledTransaction>;
970
971    /// Advances all inflight requests and returns the next event.
972    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
973        // `FuturesUnordered` doesn't close when `None` is returned. so just return pending.
974        // <https://play.rust-lang.org/?version=stable&mode=debug&edition=2021&gist=815be2b6c8003303757c3ced135f363e>
975        if self.inflight_requests.is_empty() {
976            return Poll::Pending
977        }
978
979        if let Some(resp) = ready!(self.inflight_requests.poll_next_unpin(cx)) {
980            return Poll::Ready(Some(self.on_resolved_get_pooled_transactions_request_fut(resp)))
981        }
982
983        Poll::Pending
984    }
985}
986
987impl<T: NetworkPrimitives> Default for TransactionFetcher<T> {
988    fn default() -> Self {
989        Self {
990            active_peers: LruMap::new(DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS),
991            inflight_requests: Default::default(),
992            hashes_pending_fetch: LruCache::new(DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH),
993            hashes_fetch_inflight_and_pending_fetch: LruMap::new(
994                DEFAULT_MAX_CAPACITY_CACHE_INFLIGHT_AND_PENDING_FETCH,
995            ),
996            info: TransactionFetcherInfo::default(),
997            metrics: Default::default(),
998        }
999    }
1000}
1001
1002/// Metadata of a transaction hash that is yet to be fetched.
1003#[derive(Debug, Constructor)]
1004pub struct TxFetchMetadata {
1005    /// The number of times a request attempt has been made for the hash.
1006    retries: u8,
1007    /// Peers that have announced the hash, but to which a request attempt has not yet been made.
1008    fallback_peers: LruCache<PeerId>,
1009    /// Size metadata of the transaction if it has been seen in an eth68 announcement.
1010    // todo: store all seen sizes as a `(size, peer_id)` tuple to catch peers that respond with
1011    // another size tx than they announced. alt enter in request (won't catch peers announcing
1012    // wrong size for requests assembled from hashes pending fetch if stored in request fut)
1013    tx_encoded_length: Option<usize>,
1014}
1015
1016impl TxFetchMetadata {
1017    /// Returns a mutable reference to the fallback peers cache for this transaction hash.
1018    pub const fn fallback_peers_mut(&mut self) -> &mut LruCache<PeerId> {
1019        &mut self.fallback_peers
1020    }
1021
1022    /// Returns the size of the transaction, if its hash has been received in any
1023    /// [`Eth68`](reth_eth_wire::EthVersion::Eth68) announcement. If the transaction hash has only
1024    /// been seen in [`Eth66`](reth_eth_wire::EthVersion::Eth66) announcements so far, this will
1025    /// return `None`.
1026    pub const fn tx_encoded_len(&self) -> Option<usize> {
1027        self.tx_encoded_length
1028    }
1029}
1030
1031/// Represents possible events from fetching transactions.
1032#[derive(Debug)]
1033pub enum FetchEvent<T = PooledTransaction> {
1034    /// Triggered when transactions are successfully fetched.
1035    TransactionsFetched {
1036        /// The ID of the peer from which transactions were fetched.
1037        peer_id: PeerId,
1038        /// The transactions that were fetched, if available.
1039        transactions: PooledTransactions<T>,
1040        /// Whether the peer should be penalized for sending unsolicited transactions or for
1041        /// misbehavior.
1042        report_peer: bool,
1043    },
1044    /// Triggered when there is an error in fetching transactions.
1045    FetchError {
1046        /// The ID of the peer from which an attempt to fetch transactions resulted in an error.
1047        peer_id: PeerId,
1048        /// The specific error that occurred while fetching.
1049        error: RequestError,
1050    },
1051    /// An empty response was received.
1052    EmptyResponse {
1053        /// The ID of the sender.
1054        peer_id: PeerId,
1055    },
1056}
1057
1058/// An inflight request for [`PooledTransactions`] from a peer.
1059#[derive(Debug)]
1060pub struct GetPooledTxRequest<T = PooledTransaction> {
1061    peer_id: PeerId,
1062    /// Transaction hashes that were requested, for cleanup purposes
1063    requested_hashes: RequestTxHashes,
1064    response: oneshot::Receiver<RequestResult<PooledTransactions<T>>>,
1065}
1066
1067/// Upon reception of a response, a [`GetPooledTxRequest`] is deconstructed to form a
1068/// [`GetPooledTxResponse`].
1069#[derive(Debug)]
1070pub struct GetPooledTxResponse<T = PooledTransaction> {
1071    peer_id: PeerId,
1072    /// Transaction hashes that were requested, for cleanup purposes, since peer may only return a
1073    /// subset of requested hashes.
1074    requested_hashes: RequestTxHashes,
1075    result: Result<RequestResult<PooledTransactions<T>>, RecvError>,
1076}
1077
1078/// Stores the response receiver made by sending a [`GetPooledTransactions`] request to a peer's
1079/// session.
1080#[must_use = "futures do nothing unless polled"]
1081#[pin_project::pin_project]
1082#[derive(Debug)]
1083pub struct GetPooledTxRequestFut<T = PooledTransaction> {
1084    #[pin]
1085    inner: Option<GetPooledTxRequest<T>>,
1086}
1087
1088impl<T> GetPooledTxRequestFut<T> {
1089    #[inline]
1090    const fn new(
1091        peer_id: PeerId,
1092        requested_hashes: RequestTxHashes,
1093        response: oneshot::Receiver<RequestResult<PooledTransactions<T>>>,
1094    ) -> Self {
1095        Self { inner: Some(GetPooledTxRequest { peer_id, requested_hashes, response }) }
1096    }
1097}
1098
1099impl<T> Future for GetPooledTxRequestFut<T> {
1100    type Output = GetPooledTxResponse<T>;
1101
1102    fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
1103        let mut req = self.as_mut().project().inner.take().expect("polled after completion");
1104        match req.response.poll_unpin(cx) {
1105            Poll::Ready(result) => Poll::Ready(GetPooledTxResponse {
1106                peer_id: req.peer_id,
1107                requested_hashes: req.requested_hashes,
1108                result,
1109            }),
1110            Poll::Pending => {
1111                self.project().inner.set(Some(req));
1112                Poll::Pending
1113            }
1114        }
1115    }
1116}
1117
1118/// Wrapper of unverified [`PooledTransactions`].
1119#[derive(Debug, Constructor, Deref)]
1120pub struct UnverifiedPooledTransactions<T> {
1121    txns: PooledTransactions<T>,
1122}
1123
1124/// [`PooledTransactions`] that have been successfully verified.
1125#[derive(Debug, Constructor, Deref)]
1126pub struct VerifiedPooledTransactions<T> {
1127    txns: PooledTransactions<T>,
1128}
1129
1130impl<T: SignedTransaction> DedupPayload for VerifiedPooledTransactions<T> {
1131    type Value = T;
1132
1133    fn is_empty(&self) -> bool {
1134        self.txns.is_empty()
1135    }
1136
1137    fn len(&self) -> usize {
1138        self.txns.len()
1139    }
1140
1141    fn dedup(self) -> PartiallyValidData<Self::Value> {
1142        PartiallyValidData::from_raw_data(
1143            self.txns.into_iter().map(|tx| (*tx.tx_hash(), tx)).collect(),
1144            None,
1145        )
1146    }
1147}
1148
1149trait VerifyPooledTransactionsResponse {
1150    type Transaction: SignedTransaction;
1151
1152    fn verify(
1153        self,
1154        requested_hashes: &RequestTxHashes,
1155        peer_id: &PeerId,
1156    ) -> (VerificationOutcome, VerifiedPooledTransactions<Self::Transaction>);
1157}
1158
1159impl<T: SignedTransaction> VerifyPooledTransactionsResponse for UnverifiedPooledTransactions<T> {
1160    type Transaction = T;
1161
1162    fn verify(
1163        self,
1164        requested_hashes: &RequestTxHashes,
1165        _peer_id: &PeerId,
1166    ) -> (VerificationOutcome, VerifiedPooledTransactions<T>) {
1167        let mut verification_outcome = VerificationOutcome::Ok;
1168
1169        let Self { mut txns } = self;
1170
1171        #[cfg(debug_assertions)]
1172        let mut tx_hashes_not_requested: smallvec::SmallVec<[TxHash; 16]> = smallvec::smallvec!();
1173        #[cfg(not(debug_assertions))]
1174        let mut tx_hashes_not_requested_count = 0;
1175
1176        txns.0.retain(|tx| {
1177            if !requested_hashes.contains(tx.tx_hash()) {
1178                verification_outcome = VerificationOutcome::ReportPeer;
1179
1180                #[cfg(debug_assertions)]
1181                tx_hashes_not_requested.push(*tx.tx_hash());
1182                #[cfg(not(debug_assertions))]
1183                {
1184                    tx_hashes_not_requested_count += 1;
1185                }
1186
1187                return false
1188            }
1189            true
1190        });
1191
1192        #[cfg(debug_assertions)]
1193        if !tx_hashes_not_requested.is_empty() {
1194            trace!(target: "net::tx",
1195                peer_id=format!("{_peer_id:#}"),
1196                ?tx_hashes_not_requested,
1197                "transactions in `PooledTransactions` response from peer were not requested"
1198            );
1199        }
1200        #[cfg(not(debug_assertions))]
1201        if tx_hashes_not_requested_count != 0 {
1202            trace!(target: "net::tx",
1203                peer_id=format!("{_peer_id:#}"),
1204                tx_hashes_not_requested_count,
1205                "transactions in `PooledTransactions` response from peer were not requested"
1206            );
1207        }
1208
1209        (verification_outcome, VerifiedPooledTransactions::new(txns))
1210    }
1211}
1212
1213/// Outcome from verifying a [`PooledTransactions`] response. Signals to caller whether to penalize
1214/// the sender of the response or not.
1215#[derive(Debug, Clone, Copy, PartialEq, Eq)]
1216pub enum VerificationOutcome {
1217    /// Peer behaves appropriately.
1218    Ok,
1219    /// A penalty should be flagged for the peer. Peer sent a response with unacceptably
1220    /// invalid entries.
1221    ReportPeer,
1222}
1223
1224/// Tracks stats about the [`TransactionFetcher`].
1225#[derive(Debug, Constructor)]
1226pub struct TransactionFetcherInfo {
1227    /// Max inflight [`GetPooledTransactions`] requests.
1228    pub max_inflight_requests: usize,
1229    /// Max inflight [`GetPooledTransactions`] requests per peer.
1230    pub max_inflight_requests_per_peer: u8,
1231    /// Soft limit for the byte size of the expected [`PooledTransactions`] response, upon packing
1232    /// a [`GetPooledTransactions`] request with hashes (by default less than 2 MiB worth of
1233    /// transactions is requested).
1234    pub soft_limit_byte_size_pooled_transactions_response_on_pack_request: usize,
1235    /// Soft limit for the byte size of a [`PooledTransactions`] response, upon assembling the
1236    /// response. Spec'd at 2 MiB, but can be adjusted for research purpose.
1237    pub soft_limit_byte_size_pooled_transactions_response: usize,
1238    /// Max capacity of the cache of transaction hashes, for transactions that weren't yet fetched.
1239    /// A transaction is pending fetch if its hash didn't fit into a [`GetPooledTransactions`] yet,
1240    /// or it wasn't returned upon request to peers.
1241    pub max_capacity_cache_txns_pending_fetch: u32,
1242}
1243
1244impl Default for TransactionFetcherInfo {
1245    fn default() -> Self {
1246        Self::new(
1247            DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS as usize,
1248            DEFAULT_MAX_COUNT_CONCURRENT_REQUESTS_PER_PEER,
1249            DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ,
1250            SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESPONSE,
1251            DEFAULT_MAX_CAPACITY_CACHE_PENDING_FETCH,
1252        )
1253    }
1254}
1255
1256impl From<TransactionFetcherConfig> for TransactionFetcherInfo {
1257    fn from(config: TransactionFetcherConfig) -> Self {
1258        let TransactionFetcherConfig {
1259            max_inflight_requests,
1260            max_inflight_requests_per_peer,
1261            soft_limit_byte_size_pooled_transactions_response,
1262            soft_limit_byte_size_pooled_transactions_response_on_pack_request,
1263            max_capacity_cache_txns_pending_fetch,
1264        } = config;
1265
1266        Self::new(
1267            max_inflight_requests as usize,
1268            max_inflight_requests_per_peer,
1269            soft_limit_byte_size_pooled_transactions_response_on_pack_request,
1270            soft_limit_byte_size_pooled_transactions_response,
1271            max_capacity_cache_txns_pending_fetch,
1272        )
1273    }
1274}
1275
1276#[derive(Debug, Default)]
1277struct TxFetcherSearchDurations {
1278    find_idle_peer: Duration,
1279    fill_request: Duration,
1280}
1281
1282#[cfg(test)]
1283mod test {
1284    use super::*;
1285    use crate::test_utils::transactions::{buffer_hash_to_tx_fetcher, new_mock_session};
1286    use alloy_primitives::{hex, B256};
1287    use alloy_rlp::Decodable;
1288    use derive_more::IntoIterator;
1289    use reth_eth_wire_types::EthVersion;
1290    use reth_ethereum_primitives::TransactionSigned;
1291    use std::{collections::HashSet, str::FromStr};
1292
1293    #[derive(IntoIterator)]
1294    struct TestValidAnnouncementData(Vec<(TxHash, Option<(u8, usize)>)>);
1295
1296    impl HandleMempoolData for TestValidAnnouncementData {
1297        fn is_empty(&self) -> bool {
1298            self.0.is_empty()
1299        }
1300
1301        fn len(&self) -> usize {
1302            self.0.len()
1303        }
1304
1305        fn retain_by_hash(&mut self, mut f: impl FnMut(&TxHash) -> bool) {
1306            self.0.retain(|(hash, _)| f(hash))
1307        }
1308    }
1309
1310    impl HandleVersionedMempoolData for TestValidAnnouncementData {
1311        fn msg_version(&self) -> EthVersion {
1312            EthVersion::Eth68
1313        }
1314    }
1315
1316    #[test]
1317    fn pack_eth68_request() {
1318        reth_tracing::init_test_tracing();
1319
1320        // RIG TEST
1321
1322        let tx_fetcher = &mut TransactionFetcher::<EthNetworkPrimitives>::default();
1323
1324        let eth68_hashes = [
1325            B256::from_slice(&[1; 32]),
1326            B256::from_slice(&[2; 32]),
1327            B256::from_slice(&[3; 32]),
1328            B256::from_slice(&[4; 32]),
1329            B256::from_slice(&[5; 32]),
1330        ];
1331        let eth68_sizes = [
1332            DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ - MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED - 1, // first will fit
1333            DEFAULT_SOFT_LIMIT_BYTE_SIZE_POOLED_TRANSACTIONS_RESP_ON_PACK_GET_POOLED_TRANSACTIONS_REQ, // second won't
1334            2, // free space > `MEDIAN_BYTE_SIZE_SMALL_LEGACY_TX_ENCODED`, third will fit, no more after this
1335            9,
1336            0,
1337        ];
1338
1339        let expected_request_hashes =
1340            [eth68_hashes[0], eth68_hashes[2]].into_iter().collect::<HashSet<_>>();
1341
1342        let expected_surplus_hashes =
1343            [eth68_hashes[1], eth68_hashes[3], eth68_hashes[4]].into_iter().collect::<HashSet<_>>();
1344
1345        let mut eth68_hashes_to_request = RequestTxHashes::with_capacity(3);
1346
1347        let valid_announcement_data = TestValidAnnouncementData(
1348            eth68_hashes
1349                .into_iter()
1350                .zip(eth68_sizes)
1351                .map(|(hash, size)| (hash, Some((0u8, size))))
1352                .collect::<Vec<_>>(),
1353        );
1354
1355        // TEST
1356
1357        let surplus_eth68_hashes =
1358            tx_fetcher.pack_request_eth68(&mut eth68_hashes_to_request, valid_announcement_data);
1359
1360        let eth68_hashes_to_request = eth68_hashes_to_request.into_iter().collect::<HashSet<_>>();
1361        let surplus_eth68_hashes = surplus_eth68_hashes.into_iter().collect::<HashSet<_>>();
1362
1363        assert_eq!(expected_request_hashes, eth68_hashes_to_request);
1364        assert_eq!(expected_surplus_hashes, surplus_eth68_hashes);
1365    }
1366
1367    #[tokio::test]
1368    async fn test_on_fetch_pending_hashes() {
1369        reth_tracing::init_test_tracing();
1370
1371        let tx_fetcher = &mut TransactionFetcher::default();
1372
1373        // RIG TEST
1374
1375        // hashes that will be fetched because they are stored as pending fetch
1376        let seen_hashes = [
1377            B256::from_slice(&[1; 32]),
1378            B256::from_slice(&[2; 32]),
1379            B256::from_slice(&[3; 32]),
1380            B256::from_slice(&[4; 32]),
1381        ];
1382        //
1383        // txns 1-3 are small, all will fit in request. no metadata has been made available for
1384        // hash 4, it has only been seen over eth66 conn, so average tx size will be assumed in
1385        // filling request.
1386        let seen_eth68_hashes_sizes = [120, 158, 116];
1387
1388        // peer that will fetch seen hashes because they are pending fetch
1389        let peer_1 = PeerId::new([1; 64]);
1390        // second peer, won't do anything in this test
1391        let peer_2 = PeerId::new([2; 64]);
1392
1393        // add seen hashes to peers seen transactions
1394        //
1395        // get handle for peer_1's session to receive request for pending hashes
1396        let (mut peer_1_data, mut peer_1_mock_session_rx) =
1397            new_mock_session(peer_1, EthVersion::Eth66);
1398        for hash in &seen_hashes {
1399            peer_1_data.seen_transactions.insert(*hash);
1400        }
1401        let (mut peer_2_data, _) = new_mock_session(peer_2, EthVersion::Eth66);
1402        for hash in &seen_hashes {
1403            peer_2_data.seen_transactions.insert(*hash);
1404        }
1405        let mut peers = HashMap::default();
1406        peers.insert(peer_1, peer_1_data);
1407        peers.insert(peer_2, peer_2_data);
1408
1409        // insert seen_hashes into tx fetcher
1410        for i in 0..3 {
1411            // insert peer_2 as fallback peer for seen_hashes
1412            buffer_hash_to_tx_fetcher(
1413                tx_fetcher,
1414                seen_hashes[i],
1415                peer_2,
1416                0,
1417                Some(seen_eth68_hashes_sizes[i]),
1418            );
1419        }
1420        buffer_hash_to_tx_fetcher(tx_fetcher, seen_hashes[3], peer_2, 0, None);
1421
1422        // insert pending hash without peer_1 as fallback peer, only with peer_2 as fallback peer
1423        let hash_other = B256::from_slice(&[5; 32]);
1424        buffer_hash_to_tx_fetcher(tx_fetcher, hash_other, peer_2, 0, None);
1425
1426        // add peer_1 as lru fallback peer for seen hashes
1427        for hash in &seen_hashes {
1428            buffer_hash_to_tx_fetcher(tx_fetcher, *hash, peer_1, 0, None);
1429        }
1430
1431        // seen hashes and the random hash from peer_2 are pending fetch
1432        assert_eq!(tx_fetcher.num_pending_hashes(), 5);
1433
1434        // TEST
1435
1436        tx_fetcher.on_fetch_pending_hashes(&peers, |_| true);
1437
1438        // mock session of peer_1 receives request
1439        let req = peer_1_mock_session_rx
1440            .recv()
1441            .await
1442            .expect("peer session should receive request with buffered hashes");
1443        let PeerRequest::GetPooledTransactions { request, .. } = req else { unreachable!() };
1444        let GetPooledTransactions(requested_hashes) = request;
1445
1446        assert_eq!(
1447            requested_hashes.into_iter().collect::<HashSet<_>>(),
1448            seen_hashes.into_iter().collect::<HashSet<_>>()
1449        )
1450    }
1451
1452    #[test]
1453    fn verify_response_hashes() {
1454        let input = hex!(
1455            "02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598daa"
1456        );
1457        let signed_tx_1: PooledTransaction =
1458            TransactionSigned::decode(&mut &input[..]).unwrap().try_into().unwrap();
1459        let input = hex!(
1460            "02f871018302a90f808504890aef60826b6c94ddf4c5025d1a5742cf12f74eec246d4432c295e487e09c3bbcc12b2b80c080a0f21a4eacd0bf8fea9c5105c543be5a1d8c796516875710fafafdf16d16d8ee23a001280915021bb446d1973501a67f93d2b38894a514b976e7b46dc2fe54598d76"
1461        );
1462        let signed_tx_2: PooledTransaction =
1463            TransactionSigned::decode(&mut &input[..]).unwrap().try_into().unwrap();
1464
1465        // only tx 1 is requested
1466        let request_hashes = [
1467            B256::from_str("0x3b9aca00f0671c9a2a1b817a0a78d3fe0c0f776cccb2a8c3c1b412a4f4e67890")
1468                .unwrap(),
1469            *signed_tx_1.hash(),
1470            B256::from_str("0x3b9aca00f0671c9a2a1b817a0a78d3fe0c0f776cccb2a8c3c1b412a4f4e12345")
1471                .unwrap(),
1472            B256::from_str("0x3b9aca00f0671c9a2a1b817a0a78d3fe0c0f776cccb2a8c3c1b412a4f4edabe3")
1473                .unwrap(),
1474        ];
1475
1476        for hash in &request_hashes {
1477            assert_ne!(hash, signed_tx_2.hash())
1478        }
1479
1480        let request_hashes = RequestTxHashes::new(request_hashes.into_iter().collect());
1481
1482        // but response contains tx 1 + another tx
1483        let response_txns = PooledTransactions(vec![signed_tx_1.clone(), signed_tx_2]);
1484        let payload = UnverifiedPooledTransactions::new(response_txns);
1485
1486        let (outcome, verified_payload) = payload.verify(&request_hashes, &PeerId::ZERO);
1487
1488        assert_eq!(VerificationOutcome::ReportPeer, outcome);
1489        assert_eq!(1, verified_payload.len());
1490        assert!(verified_payload.contains(&signed_tx_1));
1491    }
1492}