1use alloy_consensus::BlockHeader;
2use alloy_eips::BlockHashOrNumber;
3use alloy_primitives::{BlockHash, BlockNumber, Sealable, B256};
4use async_compression::tokio::bufread::GzipDecoder;
5use futures::Future;
6use itertools::{Either, Itertools};
7use reth_consensus::{Consensus, ConsensusError};
8use reth_network_p2p::{
9 bodies::client::{BodiesClient, BodiesFut},
10 download::DownloadClient,
11 error::RequestError,
12 headers::client::{HeadersClient, HeadersDirection, HeadersFut, HeadersRequest},
13 priority::Priority,
14 BlockClient,
15};
16use reth_network_peers::PeerId;
17use reth_primitives_traits::{Block, BlockBody, FullBlock, SealedBlock, SealedHeader};
18use std::{collections::HashMap, io, ops::RangeInclusive, path::Path, sync::Arc};
19use thiserror::Error;
20use tokio::{
21 fs::File,
22 io::{AsyncReadExt, BufReader},
23};
24use tokio_stream::StreamExt;
25use tokio_util::codec::FramedRead;
26use tracing::{debug, trace, warn};
27
28use super::file_codec::BlockFileCodec;
29use crate::receipt_file_client::FromReceiptReader;
30
31pub const DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE: u64 = 1_000_000_000;
35
36#[derive(Debug, Clone)]
48pub struct FileClient<B: Block> {
49 headers: HashMap<BlockNumber, B::Header>,
51
52 hash_to_number: HashMap<BlockHash, BlockNumber>,
54
55 bodies: HashMap<BlockHash, B::Body>,
57}
58
59#[derive(Debug, Error)]
61pub enum FileClientError {
62 #[error(transparent)]
64 Consensus(#[from] ConsensusError),
65
66 #[error(transparent)]
68 Io(#[from] std::io::Error),
69
70 #[error("{0}")]
72 Rlp(alloy_rlp::Error, Vec<u8>),
73
74 #[error("{0}")]
76 Custom(&'static str),
77}
78
79impl From<&'static str> for FileClientError {
80 fn from(value: &'static str) -> Self {
81 Self::Custom(value)
82 }
83}
84
85impl<B: FullBlock> FileClient<B> {
86 pub fn from_blocks(blocks: impl IntoIterator<Item = SealedBlock<B>>) -> Self {
88 let blocks: Vec<_> = blocks.into_iter().collect();
89 let capacity = blocks.len();
90
91 let mut headers = HashMap::with_capacity(capacity);
92 let mut hash_to_number = HashMap::with_capacity(capacity);
93 let mut bodies = HashMap::with_capacity(capacity);
94
95 for block in blocks {
96 let number = block.number();
97 let hash = block.hash();
98 let (header, body) = block.split_sealed_header_body();
99
100 headers.insert(number, header.into_header());
101 hash_to_number.insert(hash, number);
102 bodies.insert(hash, body);
103 }
104
105 Self { headers, hash_to_number, bodies }
106 }
107
108 pub async fn new<P: AsRef<Path>>(
110 path: P,
111 consensus: Arc<dyn Consensus<B>>,
112 ) -> Result<Self, FileClientError> {
113 let file = File::open(path).await?;
114 Self::from_file(file, consensus).await
115 }
116
117 pub(crate) async fn from_file(
119 mut file: File,
120 consensus: Arc<dyn Consensus<B>>,
121 ) -> Result<Self, FileClientError> {
122 let metadata = file.metadata().await?;
124 let file_len = metadata.len();
125
126 let mut reader = vec![];
127 file.read_to_end(&mut reader).await?;
128
129 Ok(FileClientBuilder { consensus, parent_header: None }
130 .build(&reader[..], file_len)
131 .await?
132 .file_client)
133 }
134
135 pub fn tip(&self) -> Option<B256> {
137 self.headers.get(&self.max_block()?).map(|h| h.hash_slow())
138 }
139
140 pub fn start(&self) -> Option<B256> {
142 self.headers.get(&self.min_block()?).map(|h| h.hash_slow())
143 }
144
145 pub fn max_block(&self) -> Option<u64> {
147 self.headers.keys().max().copied()
148 }
149
150 pub fn min_block(&self) -> Option<u64> {
152 self.headers.keys().min().copied()
153 }
154
155 pub fn tip_header(&self) -> Option<SealedHeader<B::Header>> {
158 self.headers.get(&self.max_block()?).map(|h| SealedHeader::seal_slow(h.clone()))
159 }
160
161 pub fn has_canonical_blocks(&self) -> bool {
163 if self.headers.is_empty() {
164 return true
165 }
166 let (min, max) = self.headers.keys().minmax().into_option().expect("not empty");
167 *max - *min + 1 == self.headers.len() as u64
169 }
170
171 pub fn with_bodies(mut self, bodies: HashMap<BlockHash, B::Body>) -> Self {
173 self.bodies = bodies;
174 self
175 }
176
177 pub fn with_headers(mut self, headers: HashMap<BlockNumber, B::Header>) -> Self {
179 self.headers = headers;
180 for (number, header) in &self.headers {
181 self.hash_to_number.insert(header.hash_slow(), *number);
182 }
183 self
184 }
185
186 pub fn headers_len(&self) -> usize {
188 self.headers.len()
189 }
190
191 pub fn bodies_len(&self) -> usize {
193 self.bodies.len()
194 }
195
196 pub fn headers_iter(&self) -> impl Iterator<Item = &B::Header> {
198 self.headers.values()
199 }
200
201 pub fn bodies_iter_mut(&mut self) -> impl Iterator<Item = (u64, &mut B::Body)> {
205 let bodies = &mut self.bodies;
206 let numbers = &self.hash_to_number;
207 bodies.iter_mut().map(|(hash, body)| (numbers[hash], body))
208 }
209
210 pub fn total_transactions(&self) -> usize {
212 self.bodies.iter().fold(0, |acc, (_, body)| acc + body.transactions().len())
213 }
214}
215
216struct FileClientBuilder<B: Block> {
217 pub consensus: Arc<dyn Consensus<B>>,
218 pub parent_header: Option<SealedHeader<B::Header>>,
219}
220
221impl<B: FullBlock<Header: reth_primitives_traits::BlockHeader>> FromReader
222 for FileClientBuilder<B>
223{
224 type Error = FileClientError;
225 type Output = FileClient<B>;
226
227 fn build<R>(
229 &self,
230 reader: R,
231 num_bytes: u64,
232 ) -> impl Future<Output = Result<DecodedFileChunk<Self::Output>, Self::Error>>
233 where
234 R: AsyncReadExt + Unpin,
235 {
236 let mut headers = HashMap::default();
237 let mut hash_to_number = HashMap::default();
238 let mut bodies = HashMap::default();
239
240 let mut stream =
242 FramedRead::with_capacity(reader, BlockFileCodec::<B>::default(), num_bytes as usize);
243
244 trace!(target: "downloaders::file",
245 target_num_bytes=num_bytes,
246 capacity=stream.read_buffer().capacity(),
247 "init decode stream"
248 );
249
250 let mut remaining_bytes = vec![];
251
252 let mut log_interval = 0;
253 let mut log_interval_start_block = 0;
254
255 let mut parent_header = self.parent_header.clone();
256
257 async move {
258 while let Some(block_res) = stream.next().await {
259 let block = match block_res {
260 Ok(block) => block,
261 Err(FileClientError::Rlp(err, bytes)) => {
262 trace!(target: "downloaders::file",
263 %err,
264 bytes_len=bytes.len(),
265 "partial block returned from decoding chunk"
266 );
267 remaining_bytes = bytes;
268 break
269 }
270 Err(err) => return Err(err),
271 };
272
273 let block = SealedBlock::seal_slow(block);
274
275 self.consensus.validate_header(block.sealed_header())?;
277 if let Some(parent) = &parent_header {
278 self.consensus.validate_header_against_parent(block.sealed_header(), parent)?;
279 parent_header = Some(block.sealed_header().clone());
280 }
281
282 self.consensus.validate_block_pre_execution(&block)?;
284
285 let block_hash = block.hash();
287 let block_number = block.number();
288 let (header, body) = block.split_sealed_header_body();
289 headers.insert(block_number, header.unseal());
290 hash_to_number.insert(block_hash, block_number);
291 bodies.insert(block_hash, body);
292
293 if log_interval == 0 {
294 trace!(target: "downloaders::file",
295 block_number,
296 "read first block"
297 );
298 log_interval_start_block = block_number;
299 } else if log_interval % 100_000 == 0 {
300 trace!(target: "downloaders::file",
301 blocks=?log_interval_start_block..=block_number,
302 "read blocks from file"
303 );
304 log_interval_start_block = block_number + 1;
305 }
306 log_interval += 1;
307 }
308
309 trace!(target: "downloaders::file", blocks = headers.len(), "Initialized file client");
310
311 Ok(DecodedFileChunk {
312 file_client: FileClient { headers, hash_to_number, bodies },
313 remaining_bytes,
314 highest_block: None,
315 })
316 }
317 }
318}
319
320impl<B: FullBlock> HeadersClient for FileClient<B> {
321 type Header = B::Header;
322 type Output = HeadersFut<B::Header>;
323
324 fn get_headers_with_priority(
325 &self,
326 request: HeadersRequest,
327 _priority: Priority,
328 ) -> Self::Output {
329 let mut headers = Vec::new();
331 trace!(target: "downloaders::file", request=?request, "Getting headers");
332
333 let start_num = match request.start {
334 BlockHashOrNumber::Hash(hash) => match self.hash_to_number.get(&hash) {
335 Some(num) => *num,
336 None => {
337 warn!(%hash, "Could not find starting block number for requested header hash");
338 return Box::pin(async move { Err(RequestError::BadResponse) })
339 }
340 },
341 BlockHashOrNumber::Number(num) => num,
342 };
343
344 let range = if request.limit == 1 {
345 Either::Left(start_num..start_num + 1)
346 } else {
347 match request.direction {
348 HeadersDirection::Rising => Either::Left(start_num..start_num + request.limit),
349 HeadersDirection::Falling => {
350 Either::Right((start_num - request.limit + 1..=start_num).rev())
351 }
352 }
353 };
354
355 trace!(target: "downloaders::file", range=?range, "Getting headers with range");
356
357 for block_number in range {
358 match self.headers.get(&block_number).cloned() {
359 Some(header) => headers.push(header),
360 None => {
361 warn!(number=%block_number, "Could not find header");
362 return Box::pin(async move { Err(RequestError::BadResponse) })
363 }
364 }
365 }
366
367 Box::pin(async move { Ok((PeerId::default(), headers).into()) })
368 }
369}
370
371impl<B: FullBlock> BodiesClient for FileClient<B> {
372 type Body = B::Body;
373 type Output = BodiesFut<B::Body>;
374
375 fn get_block_bodies_with_priority_and_range_hint(
376 &self,
377 hashes: Vec<B256>,
378 _priority: Priority,
379 _range_hint: Option<RangeInclusive<u64>>,
380 ) -> Self::Output {
381 let mut bodies = Vec::new();
383
384 for hash in hashes {
387 match self.bodies.get(&hash).cloned() {
388 Some(body) => bodies.push(body),
389 None => return Box::pin(async move { Err(RequestError::BadResponse) }),
390 }
391 }
392
393 Box::pin(async move { Ok((PeerId::default(), bodies).into()) })
394 }
395}
396
397impl<B: FullBlock> DownloadClient for FileClient<B> {
398 fn report_bad_message(&self, _peer_id: PeerId) {
399 trace!("Reported a bad message on a file client, the file may be corrupted or invalid");
400 }
402
403 fn num_connected_peers(&self) -> usize {
404 1
406 }
407}
408
409impl<B: FullBlock> BlockClient for FileClient<B> {
410 type Block = B;
411}
412
413#[derive(Debug)]
415enum FileReader {
416 Plain { file: File, remaining_bytes: u64 },
418 Gzip(GzipDecoder<BufReader<File>>),
420}
421
422impl FileReader {
423 async fn read(&mut self, buf: &mut [u8]) -> Result<usize, io::Error> {
425 match self {
426 Self::Plain { file, .. } => file.read(buf).await,
427 Self::Gzip(decoder) => decoder.read(buf).await,
428 }
429 }
430
431 async fn read_next_chunk(
434 &mut self,
435 chunk: &mut Vec<u8>,
436 chunk_byte_len: u64,
437 ) -> Result<Option<u64>, FileClientError> {
438 match self {
439 Self::Plain { .. } => self.read_plain_chunk(chunk, chunk_byte_len).await,
440 Self::Gzip(_) => {
441 Ok((self.read_gzip_chunk(chunk, chunk_byte_len).await?)
442 .then_some(chunk.len() as u64))
443 }
444 }
445 }
446
447 async fn read_plain_chunk(
448 &mut self,
449 chunk: &mut Vec<u8>,
450 chunk_byte_len: u64,
451 ) -> Result<Option<u64>, FileClientError> {
452 let Self::Plain { file, remaining_bytes } = self else {
453 unreachable!("read_plain_chunk should only be called on Plain variant")
454 };
455
456 if *remaining_bytes == 0 && chunk.is_empty() {
457 return Ok(None)
459 }
460
461 let chunk_target_len = chunk_byte_len.min(*remaining_bytes + chunk.len() as u64);
462 let old_bytes_len = chunk.len() as u64;
463
464 let new_read_bytes_target_len = chunk_target_len - old_bytes_len;
466
467 let prev_read_bytes_len = chunk.len();
469 chunk.extend(std::iter::repeat_n(0, new_read_bytes_target_len as usize));
470 let reader = &mut chunk[prev_read_bytes_len..];
471
472 let new_read_bytes_len = file.read_exact(reader).await? as u64;
474 let next_chunk_byte_len = chunk.len();
475
476 *remaining_bytes -= new_read_bytes_len;
478
479 debug!(target: "downloaders::file",
480 max_chunk_byte_len=chunk_byte_len,
481 prev_read_bytes_len,
482 new_read_bytes_target_len,
483 new_read_bytes_len,
484 next_chunk_byte_len,
485 remaining_file_byte_len=*remaining_bytes,
486 "new bytes were read from file"
487 );
488
489 Ok(Some(next_chunk_byte_len as u64))
490 }
491
492 async fn read_gzip_chunk(
494 &mut self,
495 chunk: &mut Vec<u8>,
496 chunk_byte_len: u64,
497 ) -> Result<bool, FileClientError> {
498 let mut buffer = vec![0u8; 64 * 1024];
499 loop {
500 if chunk.len() >= chunk_byte_len as usize {
501 return Ok(true)
502 }
503
504 match self.read(&mut buffer).await {
505 Ok(0) => return Ok(!chunk.is_empty()),
506 Ok(n) => {
507 chunk.extend_from_slice(&buffer[..n]);
508 }
509 Err(e) => return Err(e.into()),
510 }
511 }
512 }
513}
514
515#[derive(Debug)]
517pub struct ChunkedFileReader {
518 file: FileReader,
520 chunk: Vec<u8>,
522 chunk_byte_len: u64,
524 highest_block: Option<u64>,
527}
528
529impl ChunkedFileReader {
530 pub async fn new<P: AsRef<Path>>(
534 path: P,
535 chunk_byte_len: Option<u64>,
536 ) -> Result<Self, FileClientError> {
537 let path = path.as_ref();
538 let file = File::open(path).await?;
539 let chunk_byte_len = chunk_byte_len.unwrap_or(DEFAULT_BYTE_LEN_CHUNK_CHAIN_FILE);
540
541 Self::from_file(
542 file,
543 chunk_byte_len,
544 path.extension()
545 .and_then(|ext| ext.to_str())
546 .is_some_and(|ext| ["gz", "gzip"].contains(&ext)),
547 )
548 .await
549 }
550
551 pub async fn from_file(
553 file: File,
554 chunk_byte_len: u64,
555 is_gzip: bool,
556 ) -> Result<Self, FileClientError> {
557 let file_reader = if is_gzip {
558 FileReader::Gzip(GzipDecoder::new(BufReader::new(file)))
559 } else {
560 let remaining_bytes = file.metadata().await?.len();
561 FileReader::Plain { file, remaining_bytes }
562 };
563
564 Ok(Self { file: file_reader, chunk: vec![], chunk_byte_len, highest_block: None })
565 }
566
567 async fn read_next_chunk(&mut self) -> Result<Option<u64>, FileClientError> {
570 self.file.read_next_chunk(&mut self.chunk, self.chunk_byte_len).await
571 }
572
573 pub async fn next_chunk<B: FullBlock>(
578 &mut self,
579 consensus: Arc<dyn Consensus<B>>,
580 parent_header: Option<SealedHeader<B::Header>>,
581 ) -> Result<Option<FileClient<B>>, FileClientError> {
582 let Some(chunk_len) = self.read_next_chunk().await? else { return Ok(None) };
583
584 let DecodedFileChunk { file_client, remaining_bytes, .. } =
586 FileClientBuilder { consensus, parent_header }
587 .build(&self.chunk[..], chunk_len)
588 .await?;
589
590 self.chunk = remaining_bytes;
592
593 Ok(Some(file_client))
594 }
595
596 pub async fn next_receipts_chunk<T>(&mut self) -> Result<Option<T>, T::Error>
598 where
599 T: FromReceiptReader,
600 {
601 let Some(next_chunk_byte_len) = self.read_next_chunk().await.map_err(|e| {
602 T::Error::from(match e {
603 FileClientError::Io(io_err) => io_err,
604 _ => io::Error::other(e.to_string()),
605 })
606 })?
607 else {
608 return Ok(None)
609 };
610
611 let DecodedFileChunk { file_client, remaining_bytes, highest_block } =
613 T::from_receipt_reader(&self.chunk[..], next_chunk_byte_len, self.highest_block)
614 .await?;
615
616 self.chunk = remaining_bytes;
618 self.highest_block = highest_block;
620
621 Ok(Some(file_client))
622 }
623}
624
625pub trait FromReader {
627 type Error: From<io::Error>;
629
630 type Output;
632
633 fn build<R>(
635 &self,
636 reader: R,
637 num_bytes: u64,
638 ) -> impl Future<Output = Result<DecodedFileChunk<Self::Output>, Self::Error>>
639 where
640 Self: Sized,
641 R: AsyncReadExt + Unpin;
642}
643
644#[derive(Debug)]
646pub struct DecodedFileChunk<T> {
647 pub file_client: T,
649 pub remaining_bytes: Vec<u8>,
651 pub highest_block: Option<u64>,
654}
655
656#[cfg(test)]
657mod tests {
658 use super::*;
659 use crate::{
660 bodies::{
661 bodies::BodiesDownloaderBuilder,
662 test_utils::{insert_headers, zip_blocks},
663 },
664 headers::{reverse_headers::ReverseHeadersDownloaderBuilder, test_utils::child_header},
665 test_utils::{generate_bodies, generate_bodies_file},
666 };
667 use assert_matches::assert_matches;
668 use async_compression::tokio::write::GzipEncoder;
669 use futures_util::stream::StreamExt;
670 use rand::Rng;
671 use reth_consensus::{noop::NoopConsensus, test_utils::TestConsensus};
672 use reth_ethereum_primitives::Block;
673 use reth_network_p2p::{
674 bodies::downloader::BodyDownloader,
675 headers::downloader::{HeaderDownloader, SyncTarget},
676 };
677 use reth_provider::test_utils::create_test_provider_factory;
678 use std::sync::Arc;
679 use tokio::{
680 fs::File,
681 io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom},
682 };
683
684 #[tokio::test]
685 async fn streams_bodies_from_buffer() {
686 let factory = create_test_provider_factory();
688 let (headers, mut bodies) = generate_bodies(0..=19);
689
690 insert_headers(&factory, &headers);
691
692 let file = tempfile::tempfile().unwrap();
694
695 let client: Arc<FileClient<Block>> = Arc::new(
696 FileClient::from_file(file.into(), NoopConsensus::arc())
697 .await
698 .unwrap()
699 .with_bodies(bodies.clone().into_iter().collect()),
700 );
701 let mut downloader = BodiesDownloaderBuilder::default().build::<Block, _, _>(
702 client.clone(),
703 Arc::new(TestConsensus::default()),
704 factory,
705 );
706 downloader.set_download_range(0..=19).expect("failed to set download range");
707
708 assert_matches!(
709 downloader.next().await,
710 Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
711 );
712 }
713
714 #[tokio::test]
715 async fn download_headers_at_fork_head() {
716 reth_tracing::init_test_tracing();
717
718 let p3 = SealedHeader::default();
719 let p2 = child_header(&p3);
720 let p1 = child_header(&p2);
721 let p0 = child_header(&p1);
722
723 let file = tempfile::tempfile().unwrap();
724 let client: Arc<FileClient<Block>> = Arc::new(
725 FileClient::from_file(file.into(), NoopConsensus::arc()).await.unwrap().with_headers(
726 HashMap::from([
727 (0u64, p0.clone_header()),
728 (1, p1.clone_header()),
729 (2, p2.clone_header()),
730 (3, p3.clone_header()),
731 ]),
732 ),
733 );
734
735 let mut downloader = ReverseHeadersDownloaderBuilder::default()
736 .stream_batch_size(3)
737 .request_limit(3)
738 .build(Arc::clone(&client), Arc::new(TestConsensus::default()));
739 downloader.update_local_head(p3.clone());
740 downloader.update_sync_target(SyncTarget::Tip(p0.hash()));
741
742 let headers = downloader.next().await.unwrap();
743 assert_eq!(headers.unwrap(), vec![p0, p1, p2]);
744 assert!(downloader.next().await.is_none());
745 assert!(downloader.next().await.is_none());
746 }
747
748 #[tokio::test]
749 async fn test_download_headers_from_file() {
750 reth_tracing::init_test_tracing();
751
752 let (file, headers, _) = generate_bodies_file(0..=19).await;
754 let client: Arc<FileClient<Block>> =
756 Arc::new(FileClient::from_file(file, NoopConsensus::arc()).await.unwrap());
757
758 let mut header_downloader = ReverseHeadersDownloaderBuilder::default()
760 .build(Arc::clone(&client), Arc::new(TestConsensus::default()));
761 header_downloader.update_local_head(headers.first().unwrap().clone());
762 header_downloader.update_sync_target(SyncTarget::Tip(headers.last().unwrap().hash()));
763
764 let mut downloaded_headers = header_downloader.next().await.unwrap().unwrap();
766
767 downloaded_headers.reverse();
769
770 assert_eq!(downloaded_headers, headers[1..]);
772 }
773
774 #[tokio::test]
775 async fn test_download_bodies_from_file() {
776 let factory = create_test_provider_factory();
778 let (file, headers, mut bodies) = generate_bodies_file(0..=19).await;
779
780 let client: Arc<FileClient<Block>> =
782 Arc::new(FileClient::from_file(file, NoopConsensus::arc()).await.unwrap());
783
784 insert_headers(&factory, &headers);
786
787 let mut downloader = BodiesDownloaderBuilder::default().build::<Block, _, _>(
788 client.clone(),
789 Arc::new(TestConsensus::default()),
790 factory,
791 );
792 downloader.set_download_range(0..=19).expect("failed to set download range");
793
794 assert_matches!(
795 downloader.next().await,
796 Some(Ok(res)) => assert_eq!(res, zip_blocks(headers.iter(), &mut bodies))
797 );
798 }
799
800 #[tokio::test]
801 async fn test_chunk_download_headers_from_file() {
802 reth_tracing::init_test_tracing();
803
804 let (file, headers, _) = generate_bodies_file(0..=14).await;
806
807 let chunk_byte_len = rand::rng().random_range(2000..=10_000);
810 trace!(target: "downloaders::file::test", chunk_byte_len);
811
812 let mut reader =
814 ChunkedFileReader::from_file(file, chunk_byte_len as u64, false).await.unwrap();
815
816 let mut downloaded_headers: Vec<SealedHeader> = vec![];
817
818 let mut local_header = headers.first().unwrap().clone();
819
820 while let Some(client) =
822 reader.next_chunk::<Block>(NoopConsensus::arc(), None).await.unwrap()
823 {
824 let sync_target = client.tip_header().unwrap();
825
826 let sync_target_hash = sync_target.hash();
827
828 let mut header_downloader = ReverseHeadersDownloaderBuilder::default()
830 .build(Arc::new(client), Arc::new(TestConsensus::default()));
831 header_downloader.update_local_head(local_header.clone());
832 header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash));
833
834 let mut downloaded_headers_chunk = header_downloader.next().await.unwrap().unwrap();
836
837 local_header = sync_target;
839
840 downloaded_headers_chunk.reverse();
842 downloaded_headers.extend_from_slice(&downloaded_headers_chunk);
843 }
844
845 assert_eq!(headers[1..], downloaded_headers);
847 }
848
849 #[tokio::test]
850 async fn test_chunk_download_headers_from_gzip_file() {
851 reth_tracing::init_test_tracing();
852
853 let (file, headers, _) = generate_bodies_file(0..=14).await;
855
856 let gzip_temp_file = tempfile::NamedTempFile::new().unwrap();
858 let gzip_path = gzip_temp_file.path().to_owned();
859 drop(gzip_temp_file); let mut original_file = file;
863 original_file.seek(SeekFrom::Start(0)).await.unwrap();
864 let mut original_content = Vec::new();
865 original_file.read_to_end(&mut original_content).await.unwrap();
866
867 let mut gzip_file = File::create(&gzip_path).await.unwrap();
868 let mut encoder = GzipEncoder::new(&mut gzip_file);
869
870 encoder.write_all(&original_content).await.unwrap();
872 encoder.shutdown().await.unwrap();
873 drop(gzip_file);
874
875 let gzip_file = File::open(&gzip_path).await.unwrap();
877
878 let chunk_byte_len = rand::rng().random_range(2000..=10_000);
881 trace!(target: "downloaders::file::test", chunk_byte_len);
882
883 let mut reader =
885 ChunkedFileReader::from_file(gzip_file, chunk_byte_len as u64, true).await.unwrap();
886
887 let mut downloaded_headers: Vec<SealedHeader> = vec![];
888
889 let mut local_header = headers.first().unwrap().clone();
890
891 while let Some(client) =
893 reader.next_chunk::<Block>(NoopConsensus::arc(), None).await.unwrap()
894 {
895 if client.headers_len() == 0 {
896 continue;
897 }
898
899 let sync_target = client.tip_header().expect("tip_header should not be None");
900
901 let sync_target_hash = sync_target.hash();
902
903 let mut header_downloader = ReverseHeadersDownloaderBuilder::default()
905 .build(Arc::new(client), Arc::new(TestConsensus::default()));
906 header_downloader.update_local_head(local_header.clone());
907 header_downloader.update_sync_target(SyncTarget::Tip(sync_target_hash));
908
909 let mut downloaded_headers_chunk = header_downloader.next().await.unwrap().unwrap();
911
912 local_header = sync_target;
914
915 downloaded_headers_chunk.reverse();
917 downloaded_headers.extend_from_slice(&downloaded_headers_chunk);
918 }
919
920 assert_eq!(headers[1..], downloaded_headers);
922 }
923}