Skip to main content

reth_transaction_pool/
maintain.rs

1//! Support for maintaining the state of the transaction pool
2
3use crate::{
4    blobstore::{BlobSidecarConverter, BlobStoreCanonTracker, BlobStoreUpdates},
5    error::PoolError,
6    metrics::MaintainPoolMetrics,
7    traits::{CanonicalStateUpdate, EthPoolTransaction, TransactionPool, TransactionPoolExt},
8    AllPoolTransactions, BlobTransactionSidecarVariant, BlockInfo, PoolTransaction, PoolUpdateKind,
9    TransactionOrigin,
10};
11use alloy_consensus::{transaction::TxHashRef, BlockHeader, Typed2718};
12use alloy_eips::{BlockNumberOrTag, Decodable2718, Encodable2718};
13use alloy_primitives::{
14    map::{AddressSet, HashSet},
15    Address, BlockHash, BlockNumber, Bytes,
16};
17use alloy_rlp::Encodable;
18use futures_util::{
19    future::{BoxFuture, Fuse, FusedFuture},
20    FutureExt, Stream, StreamExt,
21};
22use reth_chain_state::CanonStateNotification;
23use reth_chainspec::{ChainSpecProvider, EthChainSpec, EthereumHardforks};
24use reth_execution_types::ChangedAccount;
25use reth_fs_util::FsPathError;
26use reth_primitives_traits::{
27    transaction::signed::SignedTransaction, NodePrimitives, SealedHeader,
28};
29use reth_storage_api::{errors::provider::ProviderError, BlockReaderIdExt, StateProviderFactory};
30use reth_tasks::Runtime;
31use serde::{Deserialize, Serialize};
32use std::{
33    borrow::Borrow,
34    hash::{Hash, Hasher},
35    path::{Path, PathBuf},
36    sync::Arc,
37};
38use tokio::{
39    sync::oneshot,
40    time::{self, Duration},
41};
42use tracing::{debug, error, info, trace, warn};
43
44/// Maximum amount of time non-executable transaction are queued.
45pub const MAX_QUEUED_TRANSACTION_LIFETIME: Duration = Duration::from_secs(3 * 60 * 60);
46
47/// Additional settings for maintaining the transaction pool
48#[derive(Debug, Clone, Copy, PartialEq, Eq)]
49pub struct MaintainPoolConfig {
50    /// Maximum (reorg) depth we handle when updating the transaction pool: `new.number -
51    /// last_seen.number`
52    ///
53    /// Default: 64 (2 epochs)
54    pub max_update_depth: u64,
55    /// Maximum number of accounts to reload from state at once when updating the transaction pool.
56    ///
57    /// Default: 100
58    pub max_reload_accounts: usize,
59
60    /// Maximum amount of time non-executable, non local transactions are queued.
61    /// Default: 3 hours
62    pub max_tx_lifetime: Duration,
63
64    /// Apply no exemptions to the locally received transactions.
65    ///
66    /// This includes:
67    ///   - no price exemptions
68    ///   - no eviction exemptions
69    pub no_local_exemptions: bool,
70}
71
72impl Default for MaintainPoolConfig {
73    fn default() -> Self {
74        Self {
75            max_update_depth: 64,
76            max_reload_accounts: 100,
77            max_tx_lifetime: MAX_QUEUED_TRANSACTION_LIFETIME,
78            no_local_exemptions: false,
79        }
80    }
81}
82
83/// Settings for local transaction backup task
84#[derive(Debug, Clone, Default)]
85pub struct LocalTransactionBackupConfig {
86    /// Path to transactions backup file
87    pub transactions_path: Option<PathBuf>,
88}
89
90impl LocalTransactionBackupConfig {
91    /// Receive path to transactions backup and return initialized config
92    pub const fn with_local_txs_backup(transactions_path: PathBuf) -> Self {
93        Self { transactions_path: Some(transactions_path) }
94    }
95}
96
97/// Returns a spawnable future for maintaining the state of the transaction pool.
98pub fn maintain_transaction_pool_future<N, Client, P, St>(
99    client: Client,
100    pool: P,
101    events: St,
102    task_spawner: Runtime,
103    config: MaintainPoolConfig,
104) -> BoxFuture<'static, ()>
105where
106    N: NodePrimitives,
107    Client: StateProviderFactory
108        + BlockReaderIdExt<Header = N::BlockHeader>
109        + ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader> + EthereumHardforks>
110        + Clone
111        + 'static,
112    P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>, Block = N::Block>
113        + 'static,
114    St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
115{
116    async move {
117        maintain_transaction_pool(client, pool, events, task_spawner, config).await;
118    }
119    .boxed()
120}
121
122/// Maintains the state of the transaction pool by handling new blocks and reorgs.
123///
124/// This listens for any new blocks and reorgs and updates the transaction pool's state accordingly
125pub async fn maintain_transaction_pool<N, Client, P, St>(
126    client: Client,
127    pool: P,
128    mut events: St,
129    task_spawner: Runtime,
130    config: MaintainPoolConfig,
131) where
132    N: NodePrimitives,
133    Client: StateProviderFactory
134        + BlockReaderIdExt<Header = N::BlockHeader>
135        + ChainSpecProvider<ChainSpec: EthChainSpec<Header = N::BlockHeader> + EthereumHardforks>
136        + Clone
137        + 'static,
138    P: TransactionPoolExt<Transaction: PoolTransaction<Consensus = N::SignedTx>, Block = N::Block>
139        + 'static,
140    St: Stream<Item = CanonStateNotification<N>> + Send + Unpin + 'static,
141{
142    let metrics = MaintainPoolMetrics::default();
143    let MaintainPoolConfig { max_update_depth, max_reload_accounts, .. } = config;
144    // ensure the pool points to latest state
145    if let Ok(Some(latest)) = client.header_by_number_or_tag(BlockNumberOrTag::Latest) {
146        let latest = SealedHeader::seal_slow(latest);
147        let chain_spec = client.chain_spec();
148        let info = BlockInfo {
149            block_gas_limit: latest.gas_limit(),
150            last_seen_block_hash: latest.hash(),
151            last_seen_block_number: latest.number(),
152            pending_basefee: chain_spec
153                .next_block_base_fee(latest.header(), latest.timestamp())
154                .unwrap_or_default(),
155            pending_blob_fee: latest
156                .maybe_next_block_blob_fee(chain_spec.blob_params_at_timestamp(latest.timestamp())),
157        };
158        pool.set_block_info(info);
159    }
160
161    // keeps track of mined blob transaction so we can clean finalized transactions
162    let mut blob_store_tracker = BlobStoreCanonTracker::default();
163
164    // keeps track of the latest finalized block
165    let mut last_finalized_block =
166        FinalizedBlockTracker::new(client.finalized_block_number().ok().flatten());
167
168    // keeps track of any dirty accounts that we know of are out of sync with the pool
169    let mut dirty_addresses = HashSet::default();
170
171    // keeps track of the state of the pool wrt to blocks
172    let mut maintained_state = MaintainedPoolState::InSync;
173
174    // the future that reloads accounts from state
175    let mut reload_accounts_fut = Fuse::terminated();
176
177    // eviction interval for stale non local txs
178    let mut stale_eviction_interval = time::interval(config.max_tx_lifetime);
179
180    // toggle for the first notification
181    let mut first_event = true;
182
183    // The update loop that waits for new blocks and reorgs and performs pool updated
184    // Listen for new chain events and derive the update action for the pool
185    loop {
186        trace!(target: "txpool", state=?maintained_state, "awaiting new block or reorg");
187
188        metrics.set_dirty_accounts_len(dirty_addresses.len());
189        let pool_info = pool.block_info();
190
191        // after performing a pool update after a new block we have some time to properly update
192        // dirty accounts and correct if the pool drifted from current state, for example after
193        // restart or a pipeline run
194        if maintained_state.is_drifted() {
195            metrics.inc_drift();
196            // assuming all senders are dirty
197            dirty_addresses = pool.unique_senders();
198            // make sure we toggle the state back to in sync
199            maintained_state = MaintainedPoolState::InSync;
200        }
201
202        // if we have accounts that are out of sync with the pool, we reload them in chunks
203        if !dirty_addresses.is_empty() && reload_accounts_fut.is_terminated() {
204            let (tx, rx) = oneshot::channel();
205            let c = client.clone();
206            let at = pool_info.last_seen_block_hash;
207            let fut = if dirty_addresses.len() > max_reload_accounts {
208                // need to chunk accounts to reload
209                let accs_to_reload =
210                    dirty_addresses.iter().copied().take(max_reload_accounts).collect::<Vec<_>>();
211                for acc in &accs_to_reload {
212                    // make sure we remove them from the dirty set
213                    dirty_addresses.remove(acc);
214                }
215                async move {
216                    let res = load_accounts(c, at, accs_to_reload);
217                    let _ = tx.send(res);
218                }
219                .boxed()
220            } else {
221                // can fetch all dirty accounts at once
222                let accs_to_reload = std::mem::take(&mut dirty_addresses);
223                async move {
224                    let res = load_accounts(c, at, accs_to_reload);
225                    let _ = tx.send(res);
226                }
227                .boxed()
228            };
229            reload_accounts_fut = rx.fuse();
230            task_spawner.spawn_blocking_task(fut);
231        }
232
233        // check if we have a new finalized block
234        if let Some(finalized) =
235            last_finalized_block.update(client.finalized_block_number().ok().flatten()) &&
236            let BlobStoreUpdates::Finalized(blobs) =
237                blob_store_tracker.on_finalized_block(finalized)
238        {
239            metrics.inc_deleted_tracked_blobs(blobs.len());
240            // remove all finalized blobs from the blob store
241            pool.delete_blobs(blobs);
242            // and also do periodic cleanup
243            let pool = pool.clone();
244            task_spawner.spawn_blocking_task(async move {
245                debug!(target: "txpool", finalized_block = %finalized, "cleaning up blob store");
246                pool.cleanup_blobs();
247            });
248        }
249
250        // outcomes of the futures we are waiting on
251        let mut event = None;
252        let mut reloaded = None;
253
254        // select of account reloads and new canonical state updates which should arrive at the rate
255        // of the block time
256        tokio::select! {
257            res = &mut reload_accounts_fut =>  {
258                reloaded = Some(res);
259            }
260            ev = events.next() =>  {
261                 if ev.is_none() {
262                    // the stream ended, we are done
263                    break;
264                }
265                event = ev;
266                // on receiving the first event on start up, mark the pool as drifted to explicitly
267                // trigger revalidation and clear out outdated txs.
268                if first_event {
269                    maintained_state = MaintainedPoolState::Drifted;
270                    first_event = false
271                }
272            }
273            _ = stale_eviction_interval.tick() => {
274                let queued = pool
275                    .queued_transactions();
276                let mut stale_blobs = Vec::new();
277                let now = std::time::Instant::now();
278                let stale_txs: Vec<_> = queued
279                    .into_iter()
280                    .filter(|tx| {
281                        // filter stale transactions based on config
282                        (tx.origin.is_external() || config.no_local_exemptions) && now - tx.timestamp > config.max_tx_lifetime
283                    })
284                    .map(|tx| {
285                        if tx.is_eip4844() {
286                            stale_blobs.push(*tx.hash());
287                        }
288                        *tx.hash()
289                    })
290                    .collect();
291                debug!(target: "txpool", count=%stale_txs.len(), "removing stale transactions");
292                pool.remove_transactions(stale_txs);
293                pool.delete_blobs(stale_blobs);
294            }
295        }
296        // handle the result of the account reload
297        match reloaded {
298            Some(Ok(Ok(LoadedAccounts { accounts, failed_to_load }))) => {
299                // reloaded accounts successfully
300                // extend accounts we failed to load from database
301                dirty_addresses.extend(failed_to_load);
302                // update the pool with the loaded accounts
303                pool.update_accounts(accounts);
304            }
305            Some(Ok(Err(res))) => {
306                // Failed to load accounts from state
307                let (accs, err) = *res;
308                debug!(target: "txpool", %err, "failed to load accounts");
309                dirty_addresses.extend(accs);
310            }
311            Some(Err(_)) => {
312                // failed to receive the accounts, sender dropped, only possible if task panicked
313                maintained_state = MaintainedPoolState::Drifted;
314            }
315            None => {}
316        }
317
318        // handle the new block or reorg
319        let Some(event) = event else { continue };
320        match event {
321            CanonStateNotification::Reorg { old, new } => {
322                let (old_blocks, old_state) = old.inner();
323                let (new_blocks, new_state) = new.inner();
324                let new_tip = new_blocks.tip();
325                let new_first = new_blocks.first();
326                let old_first = old_blocks.first();
327
328                // check if the reorg is not canonical with the pool's block
329                if !(old_first.parent_hash() == pool_info.last_seen_block_hash ||
330                    new_first.parent_hash() == pool_info.last_seen_block_hash)
331                {
332                    // the new block points to a higher block than the oldest block in the old chain
333                    maintained_state = MaintainedPoolState::Drifted;
334                }
335
336                let chain_spec = client.chain_spec();
337
338                // fees for the next block: `new_tip+1`
339                let pending_block_base_fee = chain_spec
340                    .next_block_base_fee(new_tip.header(), new_tip.timestamp())
341                    .unwrap_or_default();
342                let pending_block_blob_fee = new_tip.header().maybe_next_block_blob_fee(
343                    chain_spec.blob_params_at_timestamp(new_tip.timestamp()),
344                );
345
346                // we know all changed account in the new chain
347                let new_changed_accounts: HashSet<_> =
348                    new_state.changed_accounts().map(ChangedAccountEntry).collect();
349
350                // find all accounts that were changed in the old chain but _not_ in the new chain
351                let missing_changed_acc = old_state
352                    .accounts_iter()
353                    .map(|(a, _)| a)
354                    .filter(|addr| !new_changed_accounts.contains(addr));
355
356                // for these we need to fetch the nonce+balance from the db at the new tip
357                let mut changed_accounts =
358                    match load_accounts(client.clone(), new_tip.hash(), missing_changed_acc) {
359                        Ok(LoadedAccounts { accounts, failed_to_load }) => {
360                            // extend accounts we failed to load from database
361                            dirty_addresses.extend(failed_to_load);
362
363                            accounts
364                        }
365                        Err(err) => {
366                            let (addresses, err) = *err;
367                            debug!(
368                                target: "txpool",
369                                %err,
370                                "failed to load missing changed accounts at new tip: {:?}",
371                                new_tip.hash()
372                            );
373                            dirty_addresses.extend(addresses);
374                            vec![]
375                        }
376                    };
377
378                // also include all accounts from new chain
379                // we can use extend here because they are unique
380                changed_accounts.extend(new_changed_accounts.into_iter().map(|entry| entry.0));
381
382                // all transactions mined in the new chain
383                let new_mined_transactions: HashSet<_> = new_blocks.transaction_hashes().collect();
384
385                // update the pool then re-inject the pruned transactions
386                // find all transactions that were mined in the old chain but not in the new chain
387                let pruned_old_transactions = old_blocks
388                    .transactions_ecrecovered()
389                    .filter(|tx| !new_mined_transactions.contains(tx.tx_hash()))
390                    .filter_map(|tx| {
391                        if tx.is_eip4844() {
392                            // reorged blobs no longer include the blob, which is necessary for
393                            // validating the transaction. Even though the transaction could have
394                            // been validated previously, we still need the blob in order to
395                            // accurately set the transaction's
396                            // encoded-length which is propagated over the network.
397                            pool.get_blob(*tx.tx_hash())
398                                .ok()
399                                .flatten()
400                                .map(Arc::unwrap_or_clone)
401                                .and_then(|sidecar| {
402                                    <P as TransactionPool>::Transaction::try_from_eip4844(
403                                        tx, sidecar,
404                                    )
405                                })
406                        } else {
407                            <P as TransactionPool>::Transaction::try_from_consensus(tx).ok()
408                        }
409                    })
410                    .collect::<Vec<_>>();
411
412                // update the pool first
413                let update = CanonicalStateUpdate {
414                    new_tip: new_tip.sealed_block(),
415                    pending_block_base_fee,
416                    pending_block_blob_fee,
417                    changed_accounts,
418                    // all transactions mined in the new chain need to be removed from the pool
419                    mined_transactions: new_blocks.transaction_hashes().collect(),
420                    update_kind: PoolUpdateKind::Reorg,
421                };
422                pool.on_canonical_state_change(update);
423
424                // all transactions that were mined in the old chain but not in the new chain need
425                // to be re-injected
426                //
427                // Note: we no longer know if the tx was local or external
428                // Because the transactions are not finalized, the corresponding blobs are still in
429                // blob store (if we previously received them from the network)
430                metrics.inc_reinserted_transactions(pruned_old_transactions.len());
431                let _ = pool.add_external_transactions(pruned_old_transactions).await;
432
433                // keep track of new mined blob transactions
434                blob_store_tracker.add_new_chain_blocks(&new_blocks);
435            }
436            CanonStateNotification::Commit { new } => {
437                let (blocks, state) = new.inner();
438                let tip = blocks.tip();
439                let chain_spec = client.chain_spec();
440
441                // fees for the next block: `tip+1`
442                let pending_block_base_fee = chain_spec
443                    .next_block_base_fee(tip.header(), tip.timestamp())
444                    .unwrap_or_default();
445                let pending_block_blob_fee = tip.header().maybe_next_block_blob_fee(
446                    chain_spec.blob_params_at_timestamp(tip.timestamp()),
447                );
448
449                let first_block = blocks.first();
450                trace!(
451                    target: "txpool",
452                    first = first_block.number(),
453                    tip = tip.number(),
454                    pool_block = pool_info.last_seen_block_number,
455                    "update pool on new commit"
456                );
457
458                // check if the depth is too large and should be skipped, this could happen after
459                // initial sync or long re-sync
460                let depth = tip.number().abs_diff(pool_info.last_seen_block_number);
461                if depth > max_update_depth {
462                    maintained_state = MaintainedPoolState::Drifted;
463                    debug!(target: "txpool", ?depth, "skipping deep canonical update");
464                    let info = BlockInfo {
465                        block_gas_limit: tip.header().gas_limit(),
466                        last_seen_block_hash: tip.hash(),
467                        last_seen_block_number: tip.number(),
468                        pending_basefee: pending_block_base_fee,
469                        pending_blob_fee: pending_block_blob_fee,
470                    };
471                    pool.set_block_info(info);
472
473                    // keep track of mined blob transactions
474                    blob_store_tracker.add_new_chain_blocks(&blocks);
475
476                    continue
477                }
478
479                let mut changed_accounts = Vec::with_capacity(state.state().len());
480                for acc in state.changed_accounts() {
481                    // we can always clear the dirty flag for this account
482                    dirty_addresses.remove(&acc.address);
483                    changed_accounts.push(acc);
484                }
485
486                let mined_transactions = blocks.transaction_hashes().collect();
487
488                // check if the range of the commit is canonical with the pool's block
489                if first_block.parent_hash() != pool_info.last_seen_block_hash {
490                    // we received a new canonical chain commit but the commit is not canonical with
491                    // the pool's block, this could happen after initial sync or
492                    // long re-sync
493                    maintained_state = MaintainedPoolState::Drifted;
494                }
495
496                // Canonical update
497                let update = CanonicalStateUpdate {
498                    new_tip: tip.sealed_block(),
499                    pending_block_base_fee,
500                    pending_block_blob_fee,
501                    changed_accounts,
502                    mined_transactions,
503                    update_kind: PoolUpdateKind::Commit,
504                };
505                pool.on_canonical_state_change(update);
506
507                // keep track of mined blob transactions
508                blob_store_tracker.add_new_chain_blocks(&blocks);
509
510                // If Osaka activates in 2 slots we need to convert blobs to new format.
511                if !chain_spec.is_osaka_active_at_timestamp(tip.timestamp()) &&
512                    !chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(12)) &&
513                    chain_spec.is_osaka_active_at_timestamp(tip.timestamp().saturating_add(24))
514                {
515                    let pool = pool.clone();
516                    let spawner = task_spawner.clone();
517                    let client = client.clone();
518                    task_spawner.spawn_task(async move {
519                        // Start converting not eaerlier than 4 seconds into current slot to ensure
520                        // that our pool only contains valid transactions for the next block (as
521                        // it's not Osaka yet).
522                        tokio::time::sleep(Duration::from_secs(4)).await;
523
524                        let mut interval = tokio::time::interval(Duration::from_secs(1));
525                        loop {
526                            // Loop and replace blob transactions until we reach Osaka transition
527                            // block after which no legacy blobs are going to be accepted.
528                            let last_iteration =
529                                client.latest_header().ok().flatten().is_none_or(|header| {
530                                    client
531                                        .chain_spec()
532                                        .is_osaka_active_at_timestamp(header.timestamp())
533                                });
534
535                            let AllPoolTransactions { pending, queued } = pool.all_transactions();
536                            for tx in pending.into_iter().chain(queued).filter(|tx| tx.is_eip4844())
537                            {
538                                let tx_hash = *tx.hash();
539
540                                // Fetch sidecar from the pool
541                                let Ok(Some(sidecar)) = pool.get_blob(tx_hash) else {
542                                    continue;
543                                };
544                                // Ensure it is a legacy blob
545                                if !sidecar.is_eip4844() {
546                                    continue;
547                                }
548                                // Remove transaction and sidecar from the pool, both are in memory
549                                // now
550                                let Some(tx) = pool.remove_transactions(vec![tx_hash]).pop() else {
551                                    continue;
552                                };
553                                pool.delete_blob(tx_hash);
554
555                                let BlobTransactionSidecarVariant::Eip4844(sidecar) =
556                                    Arc::unwrap_or_clone(sidecar)
557                                else {
558                                    continue;
559                                };
560
561                                let converter = BlobSidecarConverter::new();
562                                let pool = pool.clone();
563                                spawner.spawn_task(async move {
564                                    // Convert sidecar to EIP-7594 format
565                                    let Some(sidecar) = converter.convert(sidecar).await else {
566                                        return;
567                                    };
568
569                                    // Re-insert transaction with the new sidecar
570                                    let origin = tx.origin;
571                                    let Some(tx) = EthPoolTransaction::try_from_eip4844(
572                                        tx.to_consensus(),
573                                        sidecar.into(),
574                                    ) else {
575                                        return;
576                                    };
577                                    let _ = pool.add_transaction(origin, tx).await;
578                                });
579                            }
580
581                            if last_iteration {
582                                break;
583                            }
584
585                            interval.tick().await;
586                        }
587                    });
588                }
589            }
590        }
591    }
592}
593
594struct FinalizedBlockTracker {
595    last_finalized_block: Option<BlockNumber>,
596}
597
598impl FinalizedBlockTracker {
599    const fn new(last_finalized_block: Option<BlockNumber>) -> Self {
600        Self { last_finalized_block }
601    }
602
603    /// Updates the tracked finalized block and returns the new finalized block if it changed
604    fn update(&mut self, finalized_block: Option<BlockNumber>) -> Option<BlockNumber> {
605        let finalized = finalized_block?;
606        self.last_finalized_block.is_none_or(|last| last < finalized).then(|| {
607            self.last_finalized_block = Some(finalized);
608            finalized
609        })
610    }
611}
612
613/// Keeps track of the pool's state, whether the accounts in the pool are in sync with the actual
614/// state.
615#[derive(Debug, PartialEq, Eq)]
616enum MaintainedPoolState {
617    /// Pool is assumed to be in sync with the current state
618    InSync,
619    /// Pool could be out of sync with the state
620    Drifted,
621}
622
623impl MaintainedPoolState {
624    /// Returns `true` if the pool is assumed to be out of sync with the current state.
625    #[inline]
626    const fn is_drifted(&self) -> bool {
627        matches!(self, Self::Drifted)
628    }
629}
630
631/// A unique [`ChangedAccount`] identified by its address that can be used for deduplication
632#[derive(Eq)]
633struct ChangedAccountEntry(ChangedAccount);
634
635impl PartialEq for ChangedAccountEntry {
636    fn eq(&self, other: &Self) -> bool {
637        self.0.address == other.0.address
638    }
639}
640
641impl Hash for ChangedAccountEntry {
642    fn hash<H: Hasher>(&self, state: &mut H) {
643        self.0.address.hash(state);
644    }
645}
646
647impl Borrow<Address> for ChangedAccountEntry {
648    fn borrow(&self) -> &Address {
649        &self.0.address
650    }
651}
652
653#[derive(Default)]
654struct LoadedAccounts {
655    /// All accounts that were loaded
656    accounts: Vec<ChangedAccount>,
657    /// All accounts that failed to load
658    failed_to_load: Vec<Address>,
659}
660
661/// Loads all accounts at the given state
662///
663/// Returns an error with all given addresses if the state is not available.
664///
665/// Note: this expects _unique_ addresses
666fn load_accounts<Client, I>(
667    client: Client,
668    at: BlockHash,
669    addresses: I,
670) -> Result<LoadedAccounts, Box<(AddressSet, ProviderError)>>
671where
672    I: IntoIterator<Item = Address>,
673    Client: StateProviderFactory,
674{
675    let addresses = addresses.into_iter();
676    let mut res = LoadedAccounts::default();
677    let state = match client.history_by_block_hash(at) {
678        Ok(state) => state,
679        Err(err) => return Err(Box::new((addresses.collect(), err))),
680    };
681    for addr in addresses {
682        if let Ok(maybe_acc) = state.basic_account(&addr) {
683            let acc = maybe_acc
684                .map(|acc| ChangedAccount { address: addr, nonce: acc.nonce, balance: acc.balance })
685                .unwrap_or_else(|| ChangedAccount::empty(addr));
686            res.accounts.push(acc)
687        } else {
688            // failed to load account.
689            res.failed_to_load.push(addr);
690        }
691    }
692    Ok(res)
693}
694
695/// Loads transactions from a file, decodes them from the JSON or RLP format, and
696/// inserts them into the transaction pool on node boot up.
697/// The file is removed after the transactions have been successfully processed.
698async fn load_and_reinsert_transactions<P>(
699    pool: P,
700    file_path: &Path,
701) -> Result<(), TransactionsBackupError>
702where
703    P: TransactionPool<Transaction: PoolTransaction<Consensus: SignedTransaction>>,
704{
705    if !file_path.exists() {
706        return Ok(())
707    }
708
709    debug!(target: "txpool", txs_file =?file_path, "Check local persistent storage for saved transactions");
710    let data = reth_fs_util::read(file_path)?;
711
712    if data.is_empty() {
713        return Ok(())
714    }
715
716    let pool_transactions: Vec<(TransactionOrigin, <P as TransactionPool>::Transaction)> =
717        if let Ok(tx_backups) = serde_json::from_slice::<Vec<TxBackup>>(&data) {
718            tx_backups
719                .into_iter()
720                .filter_map(|backup| {
721                    let tx_signed =
722                        <P::Transaction as PoolTransaction>::Consensus::decode_2718_exact(
723                            backup.rlp.as_ref(),
724                        )
725                        .ok()?;
726                    let recovered = tx_signed.try_into_recovered().ok()?;
727                    let pool_tx =
728                        <P::Transaction as PoolTransaction>::try_from_consensus(recovered).ok()?;
729
730                    Some((backup.origin, pool_tx))
731                })
732                .collect()
733        } else {
734            let txs_signed: Vec<<P::Transaction as PoolTransaction>::Consensus> =
735                alloy_rlp::Decodable::decode(&mut data.as_slice())?;
736
737            txs_signed
738                .into_iter()
739                .filter_map(|tx| tx.try_into_recovered().ok())
740                .filter_map(|tx| {
741                    <P::Transaction as PoolTransaction>::try_from_consensus(tx)
742                        .ok()
743                        .map(|pool_tx| (TransactionOrigin::Local, pool_tx))
744                })
745                .collect()
746        };
747
748    let inserted = futures_util::future::join_all(
749        pool_transactions.into_iter().map(|(origin, tx)| pool.add_transaction(origin, tx)),
750    )
751    .await;
752
753    info!(target: "txpool", txs_file =?file_path, num_txs=%inserted.len(), "Successfully reinserted local transactions from file");
754    reth_fs_util::remove_file(file_path)?;
755    Ok(())
756}
757
758fn save_local_txs_backup<P>(pool: P, file_path: &Path)
759where
760    P: TransactionPool<Transaction: PoolTransaction<Consensus: Encodable>>,
761{
762    let local_transactions = pool.get_local_transactions();
763    if local_transactions.is_empty() {
764        trace!(target: "txpool", "no local transactions to save");
765        return
766    }
767
768    let local_transactions = local_transactions
769        .into_iter()
770        .map(|tx| {
771            let consensus_tx = tx.to_consensus().into_inner();
772            let rlp_data = consensus_tx.encoded_2718();
773
774            TxBackup { rlp: rlp_data.into(), origin: tx.origin }
775        })
776        .collect::<Vec<_>>();
777
778    let json_data = match serde_json::to_string(&local_transactions) {
779        Ok(data) => data,
780        Err(err) => {
781            warn!(target: "txpool", %err, txs_file=?file_path, "failed to serialize local transactions to json");
782            return
783        }
784    };
785
786    info!(target: "txpool", txs_file =?file_path, num_txs=%local_transactions.len(), "Saving current local transactions");
787    let parent_dir = file_path.parent().map(std::fs::create_dir_all).transpose();
788
789    match parent_dir.map(|_| reth_fs_util::write(file_path, json_data)) {
790        Ok(_) => {
791            info!(target: "txpool", txs_file=?file_path, "Wrote local transactions to file");
792        }
793        Err(err) => {
794            warn!(target: "txpool", %err, txs_file=?file_path, "Failed to write local transactions to file");
795        }
796    }
797}
798
799/// A transaction backup that is saved as json to a file for
800/// reinsertion into the pool
801#[derive(Debug, Deserialize, Serialize)]
802pub struct TxBackup {
803    /// Encoded transaction
804    pub rlp: Bytes,
805    /// The origin of the transaction
806    pub origin: TransactionOrigin,
807}
808
809/// Errors possible during txs backup load and decode
810#[derive(thiserror::Error, Debug)]
811pub enum TransactionsBackupError {
812    /// Error during RLP decoding of transactions
813    #[error("failed to apply transactions backup. Encountered RLP decode error: {0}")]
814    Decode(#[from] alloy_rlp::Error),
815    /// Error during json decoding of transactions
816    #[error("failed to apply transactions backup. Encountered JSON decode error: {0}")]
817    Json(#[from] serde_json::Error),
818    /// Error during file upload
819    #[error("failed to apply transactions backup. Encountered file error: {0}")]
820    FsPath(#[from] FsPathError),
821    /// Error adding transactions to the transaction pool
822    #[error("failed to insert transactions to the transactions pool. Encountered pool error: {0}")]
823    Pool(#[from] PoolError),
824}
825
826/// Task which manages saving local transactions to the persistent file in case of shutdown.
827/// Reloads the transactions from the file on the boot up and inserts them into the pool.
828pub async fn backup_local_transactions_task<P>(
829    shutdown: reth_tasks::shutdown::GracefulShutdown,
830    pool: P,
831    config: LocalTransactionBackupConfig,
832) where
833    P: TransactionPool<Transaction: PoolTransaction<Consensus: SignedTransaction>> + Clone,
834{
835    let Some(transactions_path) = config.transactions_path else {
836        // nothing to do
837        return
838    };
839
840    if let Err(err) = load_and_reinsert_transactions(pool.clone(), &transactions_path).await {
841        error!(target: "txpool", "{}", err)
842    }
843
844    let graceful_guard = shutdown.await;
845
846    // write transactions to disk
847    save_local_txs_backup(pool, &transactions_path);
848
849    drop(graceful_guard)
850}
851
852#[cfg(test)]
853mod tests {
854    use super::*;
855    use crate::{
856        blobstore::InMemoryBlobStore, validate::EthTransactionValidatorBuilder,
857        CoinbaseTipOrdering, EthPooledTransaction, Pool, TransactionOrigin,
858    };
859    use alloy_eips::eip2718::Decodable2718;
860    use alloy_primitives::{hex, U256};
861    use reth_ethereum_primitives::PooledTransactionVariant;
862    use reth_evm_ethereum::EthEvmConfig;
863    use reth_fs_util as fs;
864    use reth_provider::test_utils::{ExtendedAccount, MockEthProvider};
865    use reth_tasks::Runtime;
866
867    #[test]
868    fn changed_acc_entry() {
869        let changed_acc = ChangedAccountEntry(ChangedAccount::empty(Address::random()));
870        let mut copy = changed_acc.0;
871        copy.nonce = 10;
872        assert!(changed_acc.eq(&ChangedAccountEntry(copy)));
873    }
874
875    const EXTENSION: &str = "json";
876    const FILENAME: &str = "test_transactions_backup";
877
878    #[tokio::test(flavor = "multi_thread")]
879    async fn test_save_local_txs_backup() {
880        let temp_dir = tempfile::tempdir().unwrap();
881        let transactions_path = temp_dir.path().join(FILENAME).with_extension(EXTENSION);
882        let tx_bytes = hex!(
883            "02f87201830655c2808505ef61f08482565f94388c818ca8b9251b393131c08a736a67ccb192978801049e39c4b5b1f580c001a01764ace353514e8abdfb92446de356b260e3c1225b73fc4c8876a6258d12a129a04f02294aa61ca7676061cd99f29275491218b4754b46a0248e5e42bc5091f507"
884        );
885        let tx = PooledTransactionVariant::decode_2718(&mut &tx_bytes[..]).unwrap();
886        let provider = MockEthProvider::default().with_genesis_block();
887        let transaction = EthPooledTransaction::from_pooled(tx.try_into_recovered().unwrap());
888        let tx_to_cmp = transaction.clone();
889        let sender = hex!("1f9090aaE28b8a3dCeaDf281B0F12828e676c326").into();
890        provider.add_account(sender, ExtendedAccount::new(42, U256::MAX));
891        let blob_store = InMemoryBlobStore::default();
892        let validator = EthTransactionValidatorBuilder::new(provider, EthEvmConfig::mainnet())
893            .build(blob_store.clone());
894
895        let txpool = Pool::new(
896            validator,
897            CoinbaseTipOrdering::default(),
898            blob_store.clone(),
899            Default::default(),
900        );
901
902        txpool.add_transaction(TransactionOrigin::Local, transaction.clone()).await.unwrap();
903
904        let rt = Runtime::test();
905        let config = LocalTransactionBackupConfig::with_local_txs_backup(transactions_path.clone());
906        rt.spawn_critical_with_graceful_shutdown_signal("test task", |shutdown| {
907            backup_local_transactions_task(shutdown, txpool.clone(), config)
908        });
909
910        let mut txns = txpool.get_local_transactions();
911        let tx_on_finish = txns.pop().expect("there should be 1 transaction");
912
913        assert_eq!(*tx_to_cmp.hash(), *tx_on_finish.hash());
914
915        rt.graceful_shutdown();
916
917        let data = fs::read(transactions_path).unwrap();
918
919        let txs: Vec<TxBackup> = serde_json::from_slice::<Vec<TxBackup>>(&data).unwrap();
920        assert_eq!(txs.len(), 1);
921
922        temp_dir.close().unwrap();
923    }
924
925    #[test]
926    fn test_update_with_higher_finalized_block() {
927        let mut tracker = FinalizedBlockTracker::new(Some(10));
928        assert_eq!(tracker.update(Some(15)), Some(15));
929        assert_eq!(tracker.last_finalized_block, Some(15));
930    }
931
932    #[test]
933    fn test_update_with_lower_finalized_block() {
934        let mut tracker = FinalizedBlockTracker::new(Some(20));
935        assert_eq!(tracker.update(Some(15)), None);
936        // finalized block should NOT go backwards
937        assert_eq!(tracker.last_finalized_block, Some(20));
938    }
939
940    #[test]
941    fn test_update_with_equal_finalized_block() {
942        let mut tracker = FinalizedBlockTracker::new(Some(20));
943        assert_eq!(tracker.update(Some(20)), None);
944        assert_eq!(tracker.last_finalized_block, Some(20));
945    }
946
947    #[test]
948    fn test_update_with_no_last_finalized_block() {
949        let mut tracker = FinalizedBlockTracker::new(None);
950        assert_eq!(tracker.update(Some(10)), Some(10));
951        assert_eq!(tracker.last_finalized_block, Some(10));
952    }
953
954    #[test]
955    fn test_update_with_no_new_finalized_block() {
956        let mut tracker = FinalizedBlockTracker::new(Some(10));
957        assert_eq!(tracker.update(None), None);
958        assert_eq!(tracker.last_finalized_block, Some(10));
959    }
960
961    #[test]
962    fn test_update_with_no_finalized_blocks() {
963        let mut tracker = FinalizedBlockTracker::new(None);
964        assert_eq!(tracker.update(None), None);
965        assert_eq!(tracker.last_finalized_block, None);
966    }
967}