Skip to main content

reth_downloaders/bodies/
bodies.rs

1use super::queue::BodiesRequestQueue;
2use crate::{bodies::task::TaskDownloader, metrics::BodyDownloaderMetrics};
3use alloy_consensus::BlockHeader;
4use alloy_primitives::BlockNumber;
5use futures::Stream;
6use futures_util::StreamExt;
7use reth_config::BodiesConfig;
8use reth_consensus::Consensus;
9use reth_network_p2p::{
10    bodies::{
11        client::BodiesClient,
12        downloader::{BodyDownloader, BodyDownloaderResult},
13        response::BlockResponse,
14    },
15    error::{DownloadError, DownloadResult},
16};
17use reth_primitives_traits::{size::InMemorySize, Block, SealedHeader};
18use reth_storage_api::HeaderProvider;
19use reth_tasks::Runtime;
20use std::{
21    cmp::Ordering,
22    collections::BinaryHeap,
23    fmt::Debug,
24    ops::RangeInclusive,
25    pin::Pin,
26    sync::Arc,
27    task::{Context, Poll},
28};
29use tracing::info;
30
31/// Downloads bodies in batches.
32///
33/// All blocks in a batch are fetched at the same time.
34#[must_use = "Stream does nothing unless polled"]
35#[derive(Debug)]
36pub struct BodiesDownloader<
37    B: Block,
38    C: BodiesClient<Body = B::Body>,
39    Provider: HeaderProvider<Header = B::Header>,
40> {
41    /// The bodies client
42    client: Arc<C>,
43    /// The consensus client
44    consensus: Arc<dyn Consensus<B>>,
45    /// The database handle
46    provider: Provider,
47    /// The maximum number of non-empty blocks per one request
48    request_limit: u64,
49    /// The maximum number of block bodies returned at once from the stream
50    stream_batch_size: usize,
51    /// The allowed range for number of concurrent requests.
52    concurrent_requests_range: RangeInclusive<usize>,
53    /// Maximum number of bytes of received blocks to buffer internally.
54    max_buffered_blocks_size_bytes: usize,
55    /// Current estimated size of buffered blocks in bytes.
56    buffered_blocks_size_bytes: usize,
57    /// The range of block numbers for body download.
58    download_range: RangeInclusive<BlockNumber>,
59    /// The latest block number returned.
60    latest_queued_block_number: Option<BlockNumber>,
61    /// Requests in progress
62    in_progress_queue: BodiesRequestQueue<B, C>,
63    /// Buffered responses
64    buffered_responses: BinaryHeap<OrderedBodiesResponse<B>>,
65    /// Queued body responses that can be returned for insertion into the database.
66    queued_bodies: Vec<BlockResponse<B>>,
67    /// The bodies downloader metrics.
68    metrics: BodyDownloaderMetrics,
69}
70
71impl<B, C, Provider> BodiesDownloader<B, C, Provider>
72where
73    B: Block,
74    C: BodiesClient<Body = B::Body> + 'static,
75    Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
76{
77    /// Returns the next contiguous request.
78    fn next_headers_request(&self) -> DownloadResult<Option<Vec<SealedHeader<Provider::Header>>>> {
79        let start_at = match self.in_progress_queue.last_requested_block_number {
80            Some(num) => num + 1,
81            None => *self.download_range.start(),
82        };
83        // as the range is inclusive, we need to add 1 to the end.
84        let items_left = (self.download_range.end() + 1).saturating_sub(start_at);
85        let limit = items_left.min(self.request_limit);
86        self.query_headers(start_at..=*self.download_range.end(), limit)
87    }
88
89    /// Retrieve a batch of headers from the database starting from the provided block number.
90    ///
91    /// This method is going to return the batch as soon as one of the conditions below
92    /// is fulfilled:
93    ///     1. The number of non-empty headers in the batch equals requested.
94    ///     2. The total number of headers in the batch (both empty and non-empty) is greater than
95    ///        or equal to the stream batch size.
96    ///     3. Downloader reached the end of the range
97    ///
98    /// NOTE: The batches returned have a variable length.
99    fn query_headers(
100        &self,
101        range: RangeInclusive<BlockNumber>,
102        max_non_empty: u64,
103    ) -> DownloadResult<Option<Vec<SealedHeader<B::Header>>>> {
104        if range.is_empty() || max_non_empty == 0 {
105            return Ok(None)
106        }
107
108        // Collect headers while
109        //      1. Current block number is in range
110        //      2. The number of non empty headers is less than maximum
111        //      3. The total number of headers is less than the stream batch size (this is only
112        //         relevant if the range consists entirely of empty headers)
113        let mut collected = 0;
114        let mut non_empty_headers = 0;
115        let headers = self.provider.sealed_headers_while(range.clone(), |header| {
116            let should_take = range.contains(&header.number()) &&
117                non_empty_headers < max_non_empty &&
118                collected < self.stream_batch_size;
119
120            if should_take {
121                collected += 1;
122                if !header.is_empty() {
123                    non_empty_headers += 1;
124                }
125                true
126            } else {
127                false
128            }
129        })?;
130
131        Ok(Some(headers).filter(|h| !h.is_empty()))
132    }
133
134    /// Get the next expected block number for queueing.
135    const fn next_expected_block_number(&self) -> BlockNumber {
136        match self.latest_queued_block_number {
137            Some(num) => num + 1,
138            None => *self.download_range.start(),
139        }
140    }
141
142    /// Max requests to handle at the same time
143    ///
144    /// This depends on the number of active peers but will always be
145    /// `min_concurrent_requests..max_concurrent_requests`
146    #[inline]
147    fn concurrent_request_limit(&self) -> usize {
148        let num_peers = self.client.num_connected_peers();
149
150        let max_requests = num_peers.max(*self.concurrent_requests_range.start());
151
152        // if we're only connected to a few peers, we keep it low
153        if num_peers < *self.concurrent_requests_range.start() {
154            return max_requests
155        }
156
157        max_requests.min(*self.concurrent_requests_range.end())
158    }
159
160    /// Returns true if the size of buffered blocks is lower than the configured maximum
161    const fn has_buffer_capacity(&self) -> bool {
162        self.buffered_blocks_size_bytes < self.max_buffered_blocks_size_bytes
163    }
164
165    // Check if the stream is terminated
166    fn is_terminated(&self) -> bool {
167        // There is nothing to request if the range is empty
168        let nothing_to_request = self.download_range.is_empty() ||
169            // or all blocks have already been requested.
170            self.in_progress_queue
171                .last_requested_block_number.is_some_and(|last| last == *self.download_range.end());
172
173        nothing_to_request &&
174            self.in_progress_queue.is_empty() &&
175            self.buffered_responses.is_empty() &&
176            self.queued_bodies.is_empty()
177    }
178
179    /// Clear all download related data.
180    ///
181    /// Should be invoked upon encountering fatal error.
182    fn clear(&mut self) {
183        self.download_range = RangeInclusive::new(1, 0);
184        self.latest_queued_block_number.take();
185        self.in_progress_queue.clear();
186        self.queued_bodies = Vec::new();
187        self.buffered_responses = BinaryHeap::new();
188        self.buffered_blocks_size_bytes = 0;
189
190        // reset metrics
191        self.metrics.in_flight_requests.set(0.);
192        self.metrics.buffered_responses.set(0.);
193        self.metrics.buffered_blocks.set(0.);
194        self.metrics.buffered_blocks_size_bytes.set(0.);
195        self.metrics.queued_blocks.set(0.);
196    }
197
198    /// Queues bodies and sets the latest queued block number
199    fn queue_bodies(&mut self, bodies: Vec<BlockResponse<B>>) {
200        self.latest_queued_block_number = Some(bodies.last().expect("is not empty").block_number());
201        self.queued_bodies.extend(bodies);
202        self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
203    }
204
205    /// Removes the next response from the buffer.
206    fn pop_buffered_response(&mut self) -> Option<OrderedBodiesResponse<B>> {
207        let resp = self.buffered_responses.pop()?;
208        self.metrics.buffered_responses.decrement(1.);
209        self.buffered_blocks_size_bytes -= resp.size();
210        self.metrics.buffered_blocks.decrement(resp.len() as f64);
211        self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64);
212        Some(resp)
213    }
214
215    /// Adds a new response to the internal buffer
216    fn buffer_bodies_response(&mut self, response: Vec<BlockResponse<B>>) {
217        let size = response.iter().map(BlockResponse::size).sum::<usize>();
218
219        let response = OrderedBodiesResponse { resp: response, size };
220        let response_len = response.len();
221
222        self.buffered_blocks_size_bytes += size;
223        self.buffered_responses.push(response);
224
225        self.metrics.buffered_blocks.increment(response_len as f64);
226        self.metrics.buffered_blocks_size_bytes.set(self.buffered_blocks_size_bytes as f64);
227        self.metrics.buffered_responses.set(self.buffered_responses.len() as f64);
228    }
229
230    /// Returns a response if its first block number matches the next expected.
231    fn try_next_buffered(&mut self) -> Option<Vec<BlockResponse<B>>> {
232        if let Some(next) = self.buffered_responses.peek() {
233            let expected = self.next_expected_block_number();
234            let next_block_range = next.block_range();
235
236            if next_block_range.contains(&expected) {
237                return self.pop_buffered_response().map(|buffered| {
238                    buffered
239                        .resp
240                        .into_iter()
241                        .skip_while(|b| b.block_number() < expected)
242                        .take_while(|b| self.download_range.contains(&b.block_number()))
243                        .collect()
244                })
245            }
246
247            // Drop buffered response since we passed that range
248            if *next_block_range.end() < expected {
249                self.pop_buffered_response();
250            }
251        }
252        None
253    }
254
255    /// Returns the next batch of block bodies that can be returned if we have enough buffered
256    /// bodies
257    fn try_split_next_batch(&mut self) -> Option<Vec<BlockResponse<B>>> {
258        if self.queued_bodies.len() >= self.stream_batch_size {
259            let next_batch = self.queued_bodies.drain(..self.stream_batch_size).collect::<Vec<_>>();
260            self.queued_bodies.shrink_to_fit();
261            self.metrics.total_flushed.increment(next_batch.len() as u64);
262            self.metrics.queued_blocks.set(self.queued_bodies.len() as f64);
263            return Some(next_batch)
264        }
265        None
266    }
267
268    /// Check if a new request can be submitted, it implements back pressure to prevent overwhelming
269    /// the system and causing memory overload.
270    ///
271    /// Returns true if a new request can be submitted
272    fn can_submit_new_request(&self) -> bool {
273        // requests are issued in order but not necessarily finished in order, so the queued bodies
274        // can grow large if a certain request is slow, so we limit the followup requests if the
275        // queued bodies grew too large
276        self.queued_bodies.len() < 4 * self.stream_batch_size &&
277            self.has_buffer_capacity() &&
278            self.in_progress_queue.len() < self.concurrent_request_limit()
279    }
280}
281
282impl<B, C, Provider> BodiesDownloader<B, C, Provider>
283where
284    B: Block + 'static,
285    C: BodiesClient<Body = B::Body> + 'static,
286    Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
287{
288    /// Convert the downloader into a [`TaskDownloader`] by spawning it via the given [`Runtime`].
289    pub fn into_task_with(self, runtime: &Runtime) -> TaskDownloader<B> {
290        TaskDownloader::spawn_with(self, runtime)
291    }
292}
293
294impl<B, C, Provider> BodyDownloader for BodiesDownloader<B, C, Provider>
295where
296    B: Block + 'static,
297    C: BodiesClient<Body = B::Body> + 'static,
298    Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
299{
300    type Block = B;
301
302    /// Set a new download range (inclusive).
303    ///
304    /// If the provided range is a suffix of the current range with the same end block, the
305    /// existing download already covers it and the call is a no-op.
306    /// If the range starts immediately after the current range, it is treated as the next
307    /// consecutive range and appended without resetting the in-flight state.
308    /// For all other ranges, the downloader state is cleared and the new range replaces the old
309    /// one.
310    fn set_download_range(&mut self, range: RangeInclusive<BlockNumber>) -> DownloadResult<()> {
311        // Check if the range is valid.
312        if range.is_empty() {
313            tracing::error!(target: "downloaders::bodies", ?range, "Bodies download range is invalid (empty)");
314            return Err(DownloadError::InvalidBodyRange { range })
315        }
316
317        // Check if the provided range is the subset of the existing range.
318        let is_current_range_subset = self.download_range.contains(range.start()) &&
319            *range.end() == *self.download_range.end();
320        if is_current_range_subset {
321            tracing::trace!(target: "downloaders::bodies", ?range, "Download range already in progress");
322            // The current range already includes requested.
323            return Ok(())
324        }
325
326        // Check if the provided range is the next expected range.
327        let count = *range.end() - *range.start() + 1; // range is inclusive
328        let is_next_consecutive_range = *range.start() == *self.download_range.end() + 1;
329        if is_next_consecutive_range {
330            // New range received.
331            tracing::trace!(target: "downloaders::bodies", ?range, "New download range set");
332            info!(target: "downloaders::bodies", count, ?range, "Downloading bodies");
333            self.download_range = range;
334            return Ok(())
335        }
336
337        // The block range is reset. This can happen either after unwind or after the bodies were
338        // written by external services (e.g. BlockchainTree).
339        tracing::trace!(target: "downloaders::bodies", ?range, prev_range = ?self.download_range, "Download range reset");
340        info!(target: "downloaders::bodies", count, ?range, "Downloading bodies");
341        // Increment out-of-order requests metric if the new start is below the last returned block
342        if let Some(last_returned) = self.latest_queued_block_number &&
343            *range.start() < last_returned
344        {
345            self.metrics.out_of_order_requests.increment(1);
346        }
347        self.clear();
348        self.download_range = range;
349        Ok(())
350    }
351}
352
353impl<B, C, Provider> Stream for BodiesDownloader<B, C, Provider>
354where
355    B: Block + 'static,
356    C: BodiesClient<Body = B::Body> + 'static,
357    Provider: HeaderProvider<Header = B::Header> + Unpin + 'static,
358{
359    type Item = BodyDownloaderResult<B>;
360
361    fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
362        let this = self.get_mut();
363        if this.is_terminated() {
364            return Poll::Ready(None)
365        }
366        // Submit new requests and poll any in progress
367        loop {
368            // Yield next batch if ready
369            if let Some(next_batch) = this.try_split_next_batch() {
370                return Poll::Ready(Some(Ok(next_batch)))
371            }
372
373            // Poll requests
374            while let Poll::Ready(Some(response)) = this.in_progress_queue.poll_next_unpin(cx) {
375                this.metrics.in_flight_requests.decrement(1.);
376                match response {
377                    Ok(response) => {
378                        this.buffer_bodies_response(response);
379                    }
380                    Err(error) => {
381                        tracing::debug!(target: "downloaders::bodies", %error, "Request failed");
382                        this.clear();
383                        return Poll::Ready(Some(Err(error)))
384                    }
385                };
386            }
387
388            // Loop exit condition
389            let mut new_request_submitted = false;
390            // Submit new requests
391            'inner: while this.can_submit_new_request() {
392                match this.next_headers_request() {
393                    Ok(Some(request)) => {
394                        this.metrics.in_flight_requests.increment(1.);
395                        this.in_progress_queue.push_new_request(
396                            Arc::clone(&this.client),
397                            Arc::clone(&this.consensus),
398                            request,
399                        );
400                        new_request_submitted = true;
401                    }
402                    Ok(None) => break 'inner,
403                    Err(error) => {
404                        tracing::error!(target: "downloaders::bodies", %error, "Failed to download from next request");
405                        this.clear();
406                        return Poll::Ready(Some(Err(error)))
407                    }
408                };
409            }
410
411            while let Some(buf_response) = this.try_next_buffered() {
412                this.queue_bodies(buf_response);
413            }
414
415            // shrink the buffer so that it doesn't grow indefinitely
416            this.buffered_responses.shrink_to_fit();
417
418            if !new_request_submitted {
419                break
420            }
421        }
422
423        // All requests are handled, stream is finished
424        if this.in_progress_queue.is_empty() {
425            if this.queued_bodies.is_empty() {
426                return Poll::Ready(None)
427            }
428            let batch_size = this.stream_batch_size.min(this.queued_bodies.len());
429            let next_batch = this.queued_bodies.drain(..batch_size).collect::<Vec<_>>();
430            this.queued_bodies.shrink_to_fit();
431            this.metrics.total_flushed.increment(next_batch.len() as u64);
432            this.metrics.queued_blocks.set(this.queued_bodies.len() as f64);
433            return Poll::Ready(Some(Ok(next_batch)))
434        }
435
436        Poll::Pending
437    }
438}
439
440#[derive(Debug)]
441struct OrderedBodiesResponse<B: Block> {
442    resp: Vec<BlockResponse<B>>,
443    /// The total size of the response in bytes
444    size: usize,
445}
446
447impl<B: Block> OrderedBodiesResponse<B> {
448    #[inline]
449    const fn len(&self) -> usize {
450        self.resp.len()
451    }
452
453    /// Returns the size of the response in bytes
454    ///
455    /// See [`BlockResponse::size`]
456    #[inline]
457    const fn size(&self) -> usize {
458        self.size
459    }
460}
461
462impl<B: Block> OrderedBodiesResponse<B> {
463    /// Returns the block number of the first element
464    ///
465    /// # Panics
466    /// If the response vec is empty.
467    fn first_block_number(&self) -> u64 {
468        self.resp.first().expect("is not empty").block_number()
469    }
470
471    /// Returns the range of the block numbers in the response
472    ///
473    /// # Panics
474    /// If the response vec is empty.
475    fn block_range(&self) -> RangeInclusive<u64> {
476        self.first_block_number()..=self.resp.last().expect("is not empty").block_number()
477    }
478}
479
480impl<B: Block> PartialEq for OrderedBodiesResponse<B> {
481    fn eq(&self, other: &Self) -> bool {
482        self.first_block_number() == other.first_block_number()
483    }
484}
485
486impl<B: Block> Eq for OrderedBodiesResponse<B> {}
487
488impl<B: Block> PartialOrd for OrderedBodiesResponse<B> {
489    fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
490        Some(self.cmp(other))
491    }
492}
493
494impl<B: Block> Ord for OrderedBodiesResponse<B> {
495    fn cmp(&self, other: &Self) -> Ordering {
496        self.first_block_number().cmp(&other.first_block_number()).reverse()
497    }
498}
499
500/// Builder for [`BodiesDownloader`].
501#[derive(Debug, Clone)]
502pub struct BodiesDownloaderBuilder {
503    /// The batch size of non-empty blocks per one request
504    pub request_limit: u64,
505    /// The maximum number of block bodies returned at once from the stream
506    pub stream_batch_size: usize,
507    /// Maximum number of bytes of received bodies to buffer internally.
508    pub max_buffered_blocks_size_bytes: usize,
509    /// The maximum number of requests to send concurrently.
510    pub concurrent_requests_range: RangeInclusive<usize>,
511}
512
513impl BodiesDownloaderBuilder {
514    /// Creates a new [`BodiesDownloaderBuilder`] with configurations based on the provided
515    /// [`BodiesConfig`].
516    pub fn new(config: BodiesConfig) -> Self {
517        Self::default()
518            .with_stream_batch_size(config.downloader_stream_batch_size)
519            .with_request_limit(config.downloader_request_limit)
520            .with_max_buffered_blocks_size_bytes(config.downloader_max_buffered_blocks_size_bytes)
521            .with_concurrent_requests_range(
522                config.downloader_min_concurrent_requests..=
523                    config.downloader_max_concurrent_requests,
524            )
525    }
526}
527
528impl Default for BodiesDownloaderBuilder {
529    fn default() -> Self {
530        Self {
531            request_limit: 200,
532            stream_batch_size: 1_000,
533            max_buffered_blocks_size_bytes: 2 * 1024 * 1024 * 1024, // ~2GB
534            concurrent_requests_range: 5..=100,
535        }
536    }
537}
538
539impl BodiesDownloaderBuilder {
540    /// Set request batch size on the downloader.
541    pub const fn with_request_limit(mut self, request_limit: u64) -> Self {
542        self.request_limit = request_limit;
543        self
544    }
545
546    /// Set stream batch size on the downloader.
547    pub const fn with_stream_batch_size(mut self, stream_batch_size: usize) -> Self {
548        self.stream_batch_size = stream_batch_size;
549        self
550    }
551
552    /// Set concurrent requests range on the downloader.
553    pub const fn with_concurrent_requests_range(
554        mut self,
555        concurrent_requests_range: RangeInclusive<usize>,
556    ) -> Self {
557        self.concurrent_requests_range = concurrent_requests_range;
558        self
559    }
560
561    /// Set max buffered block bytes on the downloader.
562    pub const fn with_max_buffered_blocks_size_bytes(
563        mut self,
564        max_buffered_blocks_size_bytes: usize,
565    ) -> Self {
566        self.max_buffered_blocks_size_bytes = max_buffered_blocks_size_bytes;
567        self
568    }
569
570    /// Consume self and return the concurrent downloader.
571    pub fn build<B, C, Provider>(
572        self,
573        client: C,
574        consensus: Arc<dyn Consensus<B>>,
575        provider: Provider,
576    ) -> BodiesDownloader<B, C, Provider>
577    where
578        B: Block,
579        C: BodiesClient<Body = B::Body> + 'static,
580        Provider: HeaderProvider<Header = B::Header>,
581    {
582        let Self {
583            request_limit,
584            stream_batch_size,
585            concurrent_requests_range,
586            max_buffered_blocks_size_bytes,
587        } = self;
588        let metrics = BodyDownloaderMetrics::default();
589        let in_progress_queue = BodiesRequestQueue::new(metrics.clone());
590        BodiesDownloader {
591            client: Arc::new(client),
592            consensus,
593            provider,
594            request_limit,
595            stream_batch_size,
596            max_buffered_blocks_size_bytes,
597            concurrent_requests_range,
598            in_progress_queue,
599            metrics,
600            download_range: RangeInclusive::new(1, 0),
601            latest_queued_block_number: None,
602            buffered_responses: Default::default(),
603            queued_bodies: Default::default(),
604            buffered_blocks_size_bytes: 0,
605        }
606    }
607}
608
609#[cfg(test)]
610mod tests {
611    use super::*;
612    use crate::{
613        bodies::test_utils::{insert_headers, zip_blocks},
614        test_utils::{generate_bodies, TestBodiesClient},
615    };
616    use alloy_primitives::{map::B256Map, B256};
617    use assert_matches::assert_matches;
618    use reth_consensus::test_utils::TestConsensus;
619    use reth_provider::test_utils::create_test_provider_factory;
620    use reth_testing_utils::generators::{self, random_block_range, BlockRangeParams};
621
622    // Check that the blocks are emitted in order of block number, not in order of
623    // first-downloaded
624    #[tokio::test]
625    async fn streams_bodies_in_order() {
626        // Generate some random blocks
627        let factory = create_test_provider_factory();
628        let (headers, mut bodies) = generate_bodies(0..=19);
629
630        insert_headers(&factory, &headers);
631
632        let client = Arc::new(
633            TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
634        );
635
636        let mut downloader = BodiesDownloaderBuilder::default()
637            .build::<reth_ethereum_primitives::Block, _, _>(
638                client.clone(),
639                Arc::new(TestConsensus::default()),
640                factory,
641            );
642        downloader.set_download_range(0..=19).expect("failed to set download range");
643
644        assert_matches!(
645            downloader.next().await,
646            Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
647        );
648        assert_eq!(client.times_requested(), 1);
649    }
650
651    // Check that the number of times requested equals to the number of headers divided by request
652    // limit.
653    #[tokio::test]
654    async fn requests_correct_number_of_times() {
655        // Generate some random blocks
656        let factory = create_test_provider_factory();
657        let mut rng = generators::rng();
658        let blocks = random_block_range(
659            &mut rng,
660            0..=199,
661            BlockRangeParams { parent: Some(B256::ZERO), tx_count: 1..2, ..Default::default() },
662        );
663
664        let headers = blocks.iter().map(|block| block.clone_sealed_header()).collect::<Vec<_>>();
665        let bodies = blocks
666            .into_iter()
667            .map(|block| (block.hash(), block.into_body()))
668            .collect::<B256Map<_>>();
669
670        insert_headers(&factory, &headers);
671
672        let request_limit = 10;
673        let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
674
675        let mut downloader = BodiesDownloaderBuilder::default()
676            .with_request_limit(request_limit)
677            .build::<reth_ethereum_primitives::Block, _, _>(
678            client.clone(),
679            Arc::new(TestConsensus::default()),
680            factory,
681        );
682        downloader.set_download_range(0..=199).expect("failed to set download range");
683
684        let _ = downloader.collect::<Vec<_>>().await;
685        assert_eq!(client.times_requested(), 20);
686    }
687
688    // Check that bodies are returned in correct order
689    // after resetting the download range multiple times.
690    #[tokio::test]
691    async fn streams_bodies_in_order_after_range_reset() {
692        // Generate some random blocks
693        let factory = create_test_provider_factory();
694        let (headers, mut bodies) = generate_bodies(0..=99);
695
696        insert_headers(&factory, &headers);
697
698        let stream_batch_size = 20;
699        let request_limit = 10;
700        let client = Arc::new(
701            TestBodiesClient::default().with_bodies(bodies.clone()).with_should_delay(true),
702        );
703        let mut downloader = BodiesDownloaderBuilder::default()
704            .with_stream_batch_size(stream_batch_size)
705            .with_request_limit(request_limit)
706            .build::<reth_ethereum_primitives::Block, _, _>(
707                client.clone(),
708                Arc::new(TestConsensus::default()),
709                factory,
710            );
711
712        let mut range_start = 0;
713        while range_start < 100 {
714            downloader.set_download_range(range_start..=99).expect("failed to set download range");
715
716            assert_matches!(
717                downloader.next().await,
718                Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().skip(range_start as usize).take(stream_batch_size), &mut bodies))
719            );
720            assert!(downloader.latest_queued_block_number >= Some(range_start));
721            range_start += stream_batch_size as u64;
722        }
723    }
724
725    // Check that the downloader picks up the new range and downloads bodies after previous range
726    // was completed.
727    #[tokio::test]
728    async fn can_download_new_range_after_termination() {
729        // Generate some random blocks
730        let factory = create_test_provider_factory();
731        let (headers, mut bodies) = generate_bodies(0..=199);
732
733        insert_headers(&factory, &headers);
734
735        let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
736
737        let mut downloader = BodiesDownloaderBuilder::default()
738            .with_stream_batch_size(100)
739            .build::<reth_ethereum_primitives::Block, _, _>(
740            client.clone(),
741            Arc::new(TestConsensus::default()),
742            factory,
743        );
744
745        // Set and download the first range
746        downloader.set_download_range(0..=99).expect("failed to set download range");
747        assert_matches!(
748            downloader.next().await,
749            Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().take(100), &mut bodies))
750        );
751
752        // Check that the stream is terminated
753        assert!(downloader.next().await.is_none());
754
755        // Set and download the second range
756        downloader.set_download_range(100..=199).expect("failed to set download range");
757        assert_matches!(
758            downloader.next().await,
759            Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().skip(100), &mut bodies))
760        );
761    }
762
763    // Check that the downloader continues after the size limit is reached.
764    #[tokio::test]
765    async fn can_download_after_exceeding_limit() {
766        // Generate some random blocks
767        let factory = create_test_provider_factory();
768        let (headers, mut bodies) = generate_bodies(0..=199);
769
770        insert_headers(&factory, &headers);
771
772        let client = Arc::new(TestBodiesClient::default().with_bodies(bodies.clone()));
773
774        // Set the max buffered block size to 1 byte, to make sure that every response exceeds the
775        // limit
776        let mut downloader = BodiesDownloaderBuilder::default()
777            .with_stream_batch_size(10)
778            .with_request_limit(1)
779            .with_max_buffered_blocks_size_bytes(1)
780            .build::<reth_ethereum_primitives::Block, _, _>(
781                client.clone(),
782                Arc::new(TestConsensus::default()),
783                factory,
784            );
785
786        // Set and download the entire range
787        downloader.set_download_range(0..=199).expect("failed to set download range");
788        let mut header = 0;
789        while let Some(Ok(resp)) = downloader.next().await {
790            assert_eq!(resp, zip_blocks(headers.iter().skip(header).take(resp.len()), &mut bodies));
791            header += resp.len();
792        }
793    }
794
795    // Check that the downloader can tolerate a few completely empty responses
796    #[tokio::test]
797    async fn can_tolerate_empty_responses() {
798        // Generate some random blocks
799        let factory = create_test_provider_factory();
800        let (headers, mut bodies) = generate_bodies(0..=99);
801
802        insert_headers(&factory, &headers);
803
804        // respond with empty bodies for every other request.
805        let client = Arc::new(
806            TestBodiesClient::default().with_bodies(bodies.clone()).with_empty_responses(2),
807        );
808
809        let mut downloader = BodiesDownloaderBuilder::default()
810            .with_request_limit(3)
811            .with_stream_batch_size(100)
812            .build::<reth_ethereum_primitives::Block, _, _>(
813                client.clone(),
814                Arc::new(TestConsensus::default()),
815                factory,
816            );
817
818        // Download the requested range
819        downloader.set_download_range(0..=99).expect("failed to set download range");
820        assert_matches!(
821            downloader.next().await,
822            Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter().take(100), &mut bodies))
823        );
824    }
825}