From 2493b57140beef80703451fe3fe6cd35631d3f49 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 15:33:44 +0200 Subject: [PATCH 1/6] lightning: introduce singular claim requests Have ChannelMonitor hand singular ClaimRequests to OnchainTxHandler. Convert them to PackageTemplates only after duplicate filtering. This makes the single-outpoint invariant explicit at that boundary. --- lightning/src/chain/channelmonitor.rs | 53 ++++++++++++++------------- lightning/src/chain/onchaintx.rs | 31 ++++++---------- lightning/src/chain/package.rs | 42 +++++++++++++++++++++ 3 files changed, 81 insertions(+), 45 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index c3e20ef5e6f..2442f9c6af1 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -38,8 +38,8 @@ use crate::chain::chaininterface::{ }; use crate::chain::onchaintx::{ClaimEvent, FeerateStrategy, OnchainTxHandler}; use crate::chain::package::{ - CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, HolderFundingOutput, - HolderHTLCOutput, PackageSolvingData, PackageTemplate, RevokedHTLCOutput, RevokedOutput, + ClaimRequest, CounterpartyOfferedHTLCOutput, CounterpartyReceivedHTLCOutput, + HolderFundingOutput, HolderHTLCOutput, PackageSolvingData, RevokedHTLCOutput, RevokedOutput, }; use crate::chain::transaction::{OutPoint, TransactionData}; use crate::chain::{BlockLocator, WatchedOutput}; @@ -3861,7 +3861,7 @@ impl ChannelMonitorImpl { fn generate_claimable_outpoints_and_watch_outputs( &mut self, generate_monitor_event_with_reason: Option, require_funding_seen: bool, - ) -> (Vec, Vec) { + ) -> (Vec, Vec) { let funding = get_confirmed_funding_scope!(self); let holder_commitment_tx = &funding.current_holder_commitment_tx; let funding_outp = HolderFundingOutput::build( @@ -3869,7 +3869,7 @@ impl ChannelMonitorImpl { funding.channel_parameters.clone(), ); let funding_outpoint = funding.funding_outpoint(); - let commitment_package = PackageTemplate::build_package( + let commitment_package = ClaimRequest::new( funding_outpoint.txid.clone(), funding_outpoint.index as u32, PackageSolvingData::HolderFundingOutput(funding_outp), self.best_block.height, @@ -3908,9 +3908,9 @@ impl ChannelMonitorImpl { let zero_fee_commitments = self.channel_type_features().supports_anchor_zero_fee_commitments(); if !zero_fee_htlcs && !zero_fee_commitments { - // Because we're broadcasting a commitment transaction, we should construct the package - // assuming it gets confirmed in the next block. Sadly, we have code which considers - // "not yet confirmed" things as discardable, so we cannot do that here. + // Because we're broadcasting a commitment transaction, we should construct claim + // requests assuming it gets confirmed in the next block. Sadly, we have code which + // considers "not yet confirmed" things as discardable, so we cannot do that here. let (mut new_outpoints, _) = self.get_broadcasted_holder_claims( funding, holder_commitment_tx, self.best_block.height, ); @@ -4759,11 +4759,11 @@ impl ChannelMonitorImpl { /// height > height + CLTV_SHARED_CLAIM_BUFFER. In any case, will install monitoring for /// HTLC-Success/HTLC-Timeout transactions. /// - /// Returns packages to claim the revoked output(s) and general information about the output that - /// is to the counterparty in the commitment transaction. + /// Returns claim requests for the revoked output(s) and general information about the output + /// that is to the counterparty in the commitment transaction. #[rustfmt::skip] fn check_spend_counterparty_transaction(&mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L) - -> (Vec, CommitmentTxCounterpartyOutputInfo) + -> (Vec, CommitmentTxCounterpartyOutputInfo) { // Most secp and related errors trying to create keys means we have no hope of constructing // a spend transaction...so we return no transactions to broadcast @@ -4803,7 +4803,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, outp.value, funding_spent.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, @@ -4832,7 +4832,7 @@ impl ChannelMonitorImpl { } else { height }; - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( commitment_txid, transaction_output_index, PackageSolvingData::RevokedHTLCOutput(revk_htlc_outp), @@ -4921,7 +4921,7 @@ impl ChannelMonitorImpl { commitment_txid: Txid, per_commitment_option: Option<&Vec<(HTLCOutputInCommitment, Option>)>>, confirmation_height: Option, - ) -> Vec { + ) -> Vec { let per_commitment_claimable_data = match per_commitment_option { Some(outputs) => outputs, None => return Vec::new(), @@ -4946,7 +4946,7 @@ impl ChannelMonitorImpl { confirmation_height, ), ); - Some(PackageTemplate::build_package( + Some(ClaimRequest::new( commitment_txid, transaction_output_index, htlc_data, @@ -4962,13 +4962,13 @@ impl ChannelMonitorImpl { .collect() } - /// Returns the HTLC claim package templates and the counterparty output info + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, tx: &Transaction, per_commitment_claimable_data: &[(HTLCOutputInCommitment, Option>)], confirmation_height: Option, - ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { + ) -> (Vec, CommitmentTxCounterpartyOutputInfo) { let mut claimable_outpoints = Vec::new(); let mut to_counterparty_output_info: CommitmentTxCounterpartyOutputInfo = None; @@ -5039,7 +5039,7 @@ impl ChannelMonitorImpl { ), ) }; - let counterparty_package = PackageTemplate::build_package( + let counterparty_package = ClaimRequest::new( commitment_txid, transaction_output_index, counterparty_htlc_outp, @@ -5057,7 +5057,7 @@ impl ChannelMonitorImpl { #[rustfmt::skip] fn check_spend_counterparty_htlc( &mut self, tx: &Transaction, commitment_number: u64, commitment_txid: &Txid, height: u32, logger: &L - ) -> (Vec, Option) { + ) -> (Vec, Option) { let secret = if let Some(secret) = self.get_secret(commitment_number) { secret } else { return (Vec::new(), None); }; let per_commitment_key = match SecretKey::from_slice(&secret) { Ok(key) => key, @@ -5088,7 +5088,7 @@ impl ChannelMonitorImpl { per_commitment_point, per_commitment_key, tx.output[idx].value, self.funding.channel_parameters.clone(), height, ); - let justice_package = PackageTemplate::build_package( + let justice_package = ClaimRequest::new( htlc_txid, idx as u32, PackageSolvingData::RevokedOutput(revk_outp), height + self.counterparty_commitment_params.on_counterparty_tx_csv as u32, ); @@ -5140,13 +5140,14 @@ impl ChannelMonitorImpl { htlcs } - // Returns (1) `PackageTemplate`s that can be given to the OnchainTxHandler, so that the handler can - // broadcast transactions claiming holder HTLC commitment outputs and (2) a holder revokable - // script so we can detect whether a holder transaction has been seen on-chain. + // Returns (1) `ClaimRequest`s that can be given to the OnchainTxHandler, so that the + // handler can broadcast transactions claiming holder HTLC commitment outputs and (2) a + // holder revokable script so we can detect whether a holder transaction has been seen + // on-chain. #[rustfmt::skip] fn get_broadcasted_holder_claims( &self, funding: &FundingScope, holder_tx: &HolderCommitmentTransaction, conf_height: u32, - ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { + ) -> (Vec, Option<(ScriptBuf, PublicKey, RevocationKey)>) { let tx = holder_tx.trust(); let keys = tx.keys(); let redeem_script = chan_utils::get_revokeable_redeemscript( @@ -5165,7 +5166,7 @@ impl ChannelMonitorImpl { }; let transaction_output_index = htlc_descriptor.htlc.transaction_output_index .expect("Expected transaction output index for non-dust HTLC"); - PackageTemplate::build_package( + ClaimRequest::new( tx.txid(), transaction_output_index, PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(htlc_descriptor, conf_height)), counterparty_spendable_height, @@ -5201,7 +5202,7 @@ impl ChannelMonitorImpl { fn check_spend_holder_transaction( &mut self, commitment_txid: Txid, commitment_tx: &Transaction, height: u32, block_hash: &BlockHash, logger: &L, - ) -> Option<(Vec, TransactionOutputs)> { + ) -> Option<(Vec, TransactionOutputs)> { let funding_spent = get_confirmed_funding_scope!(self); // HTLCs set may differ between last and previous holder commitment txn, in case of one them hitting chain, ensure we cancel all HTLCs backward @@ -5712,7 +5713,7 @@ impl ChannelMonitorImpl { conf_hash: BlockHash, txn_matched: Vec<&Transaction>, mut watch_outputs: Vec, - mut claimable_outpoints: Vec, + mut claimable_outpoints: Vec, broadcaster: &B, fee_estimator: &LowerBoundedFeeEstimator, logger: &WithContext, diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 3eb6d64f3a2..823b81936ce 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -27,7 +27,7 @@ use crate::chain::chaininterface::{ BroadcasterInterface, FeeEstimator, LowerBoundedFeeEstimator, TransactionType, }; use crate::chain::channelmonitor::ANTI_REORG_DELAY; -use crate::chain::package::{PackageSolvingData, PackageTemplate}; +use crate::chain::package::{ClaimRequest, PackageSolvingData, PackageTemplate}; use crate::chain::transaction::MaybeSignedTransaction; use crate::chain::ClaimId; use crate::ln::chan_utils::{ @@ -791,7 +791,7 @@ impl OnchainTxHandler { /// `cur_height`, however it must never be higher than `cur_height`. #[rustfmt::skip] pub(super) fn update_claims_view_from_requests( - &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, + &mut self, mut requests: Vec, conf_height: u32, cur_height: u32, broadcaster: &B, conf_target: ConfirmationTarget, destination_script: &Script, fee_estimator: &LowerBoundedFeeEstimator, logger: &L, ) { @@ -801,33 +801,26 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { - debug_assert_eq!( - req.outpoints().len(), - 1, - "Claims passed to `update_claims_view_from_requests` should not be aggregated" - ); - let mut all_outpoints_claiming = true; - for outpoint in req.outpoints() { - if self.claimable_outpoints.get(outpoint).is_none() { - all_outpoints_claiming = false; - } - } - if all_outpoints_claiming { + let outpoint = req.outpoint(); + if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", - req.outpoints()[0].txid, req.outpoints()[0].vout); + outpoint.txid, outpoint.vout); false } else { let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints() == req.outpoints()); + .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); if let Some(package) = timelocked_equivalent_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", - req.outpoints()[0].txid, req.outpoints()[0].vout, package.package_locktime(cur_height)); + outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false } else { true } } }); + let mut requests = requests.into_iter() + .map(ClaimRequest::into_package_template) + .collect::>(); // Then try to maximally aggregate `requests`. for i in (1..requests.len()).rev() { @@ -1290,7 +1283,7 @@ mod tests { use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; - use crate::chain::package::{HolderHTLCOutput, PackageSolvingData, PackageTemplate}; + use crate::chain::package::{ClaimRequest, HolderHTLCOutput, PackageSolvingData}; use crate::chain::transaction::OutPoint; use crate::ln::chan_utils::{ ChannelPublicKeys, ChannelTransactionParameters, CounterpartyChannelTransactionParameters, @@ -1412,7 +1405,7 @@ mod tests { let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { - requests.push(PackageTemplate::build_package( + requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { diff --git a/lightning/src/chain/package.rs b/lightning/src/chain/package.rs index 0ef8855242b..06be5750367 100644 --- a/lightning/src/chain/package.rs +++ b/lightning/src/chain/package.rs @@ -1097,6 +1097,19 @@ enum PackageMalleability { Untractable, } +/// A single on-chain output claim generated by [`ChannelMonitor`]. +/// +/// These requests are converted to [`PackageTemplate`]s once [`OnchainTxHandler`] has deduplicated +/// them and is ready to aggregate compatible claims. +/// +/// [`ChannelMonitor`]: crate::chain::channelmonitor::ChannelMonitor +/// [`OnchainTxHandler`]: crate::chain::onchaintx::OnchainTxHandler +#[derive(Clone, Debug, PartialEq, Eq)] +pub(crate) struct ClaimRequest { + input: (BitcoinOutPoint, PackageSolvingData), + counterparty_spendable_height: u32, +} + /// A structure to describe a package content that is generated by ChannelMonitor and /// used by OnchainTxHandler to generate and broadcast transactions settling onchain claims. /// @@ -1179,6 +1192,32 @@ impl PartialEq for PackageTemplate { } } +impl ClaimRequest { + pub(crate) fn new( + txid: Txid, vout: u32, input_solving_data: PackageSolvingData, + counterparty_spendable_height: u32, + ) -> Self { + Self { + input: (BitcoinOutPoint { txid, vout }, input_solving_data), + counterparty_spendable_height, + } + } + + pub(crate) fn outpoint(&self) -> &BitcoinOutPoint { + &self.input.0 + } + + pub(crate) fn into_package_template(self) -> PackageTemplate { + let (outpoint, input_solving_data) = self.input; + PackageTemplate::build_package( + outpoint.txid, + outpoint.vout, + input_solving_data, + self.counterparty_spendable_height, + ) + } +} + impl PackageTemplate { #[rustfmt::skip] pub(crate) fn can_merge_with(&self, other: &PackageTemplate, cur_height: u32) -> bool { @@ -1265,6 +1304,9 @@ impl PackageTemplate { pub(crate) fn outpoints(&self) -> Vec<&BitcoinOutPoint> { self.inputs.iter().map(|(o, _)| o).collect() } + pub(crate) fn contains_outpoint(&self, outpoint: &BitcoinOutPoint) -> bool { + self.inputs.iter().any(|(input, _)| input == outpoint) + } pub(crate) fn outpoints_and_creation_heights( &self, ) -> impl Iterator)> { From bcfd69d7734c944922b5839d58cd975289337d41 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 16:02:00 +0200 Subject: [PATCH 2/6] lightning: refactor onchain tx handler tests Move repeated OnchainTxHandler setup into shared test helpers so the claim-replay coverage can focus on the behavior under test. --- lightning/src/chain/onchaintx.rs | 111 +++++++++++++++++-------------- 1 file changed, 62 insertions(+), 49 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 823b81936ce..e559f093922 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -1298,12 +1298,9 @@ mod tests { use super::OnchainTxHandler; - // Test that all claims with locktime equal to or less than the current height are broadcast - // immediately while claims with locktime greater than the current height are only broadcast - // once the locktime is reached. - #[test] - #[rustfmt::skip] - fn test_broadcast_height() { + fn new_test_tx_handler( + channel_type_features: ChannelTypeFeatures, nondust_htlcs: Vec, + ) -> OnchainTxHandler { let secp_ctx = Secp256k1::new(); let signer = InMemorySigner::new( SecretKey::from_slice(&[41; 32]).unwrap(), @@ -1340,9 +1337,6 @@ mod tests { )), }; let funding_outpoint = OutPoint { txid: Txid::all_zeros(), index: u16::MAX }; - - // Use non-anchor channels so that HTLC-Timeouts are broadcast immediately instead of sent - // to the user for external funding. let chan_params = ChannelTransactionParameters { holder_pubkeys: signer.pubkeys(&secp_ctx), holder_selected_contest_delay: 66, @@ -1353,66 +1347,45 @@ mod tests { }), funding_outpoint: Some(funding_outpoint), splice_parent_funding_txid: None, - channel_type_features: ChannelTypeFeatures::only_static_remote_key(), + channel_type_features, channel_value_satoshis: 0, }; - - // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, - // and 2 blocks. - let mut nondust_htlcs = Vec::new(); - for i in 0..3 { - let preimage = PaymentPreimage([i; 32]); - let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); - nondust_htlcs.push( - HTLCOutputInCommitment { - offered: true, - amount_msat: 10000, - cltv_expiry: i as u32, - payment_hash: hash, - transaction_output_index: Some(i as u32), - } - ); - } - let holder_commit = HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); - let destination_script = ScriptBuf::new(); + let holder_commit = + HolderCommitmentTransaction::dummy(1000000, funding_outpoint, nondust_htlcs); let counterparty_node_id = PublicKey::from_slice(&[2; 33]).unwrap(); - let mut tx_handler = OnchainTxHandler::new( + OnchainTxHandler::new( ChannelId::from_bytes([0; 32]), counterparty_node_id, 1000000, [0; 32], - destination_script.clone(), + ScriptBuf::new(), signer, chan_params, holder_commit, secp_ctx, - ); - - // Create a broadcaster with current block height 1. - let broadcaster = TestBroadcaster::new(Network::Testnet); - { - let mut blocks = broadcaster.blocks.lock().unwrap(); - let genesis_hash = blocks[0].0.block_hash(); - blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); - } - - let fee_estimator = TestFeeEstimator::new(253); - let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); - let logger = TestLogger::new(); + ) + } - // Request claiming of each HTLC on the holder's commitment, with current block height 1. + fn build_offered_holder_htlc_requests( + tx_handler: &OnchainTxHandler, + ) -> Vec { let holder_commit = tx_handler.current_holder_commitment_tx(); let holder_commit_txid = holder_commit.trust().txid(); let mut requests = Vec::new(); - for (htlc, counterparty_sig) in holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) { + for (htlc, counterparty_sig) in + holder_commit.nondust_htlcs().iter().zip(holder_commit.counterparty_htlc_sigs.iter()) + { requests.push(ClaimRequest::new( holder_commit_txid, htlc.transaction_output_index.unwrap(), - PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build(HTLCDescriptor { + PackageSolvingData::HolderHTLCOutput(HolderHTLCOutput::build( + HTLCDescriptor { channel_derivation_parameters: ChannelDerivationParameters { value_satoshis: tx_handler.channel_value_satoshis, keys_id: tx_handler.channel_keys_id, - transaction_parameters: tx_handler.channel_transaction_parameters.clone(), + transaction_parameters: tx_handler + .channel_transaction_parameters + .clone(), }, commitment_txid: holder_commit_txid, per_commitment_number: holder_commit.commitment_number(), @@ -1422,11 +1395,51 @@ mod tests { preimage: None, counterparty_sig: *counterparty_sig, }, - 0 + 0, )), 0, )); } + requests + } + + // Test that all claims with locktime equal to or less than the current height are broadcast + // immediately while claims with locktime greater than the current height are only broadcast + // once the locktime is reached. + #[test] + fn test_broadcast_height() { + // Create an OnchainTxHandler for a commitment containing HTLCs with CLTV expiries of 0, 1, + // and 2 blocks. + let mut nondust_htlcs = Vec::new(); + for i in 0..3 { + let preimage = PaymentPreimage([i; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: i as u32, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + let destination_script = ScriptBuf::new(); + let mut tx_handler = + new_test_tx_handler(ChannelTypeFeatures::only_static_remote_key(), nondust_htlcs); + + // Create a broadcaster with current block height 1. + let broadcaster = TestBroadcaster::new(Network::Testnet); + { + let mut blocks = broadcaster.blocks.lock().unwrap(); + let genesis_hash = blocks[0].0.block_hash(); + blocks.push((create_dummy_block(genesis_hash, 0, Vec::new()), 1)); + } + + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Request claiming of each HTLC on the holder's commitment, with current block height 1. + let requests = build_offered_holder_htlc_requests(&tx_handler); tx_handler.update_claims_view_from_requests( requests, 1, From 97a36a87228acdc50d71dfdb0b637b3e882ea6c2 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:21:04 +0200 Subject: [PATCH 3/6] lightning: dedupe delayed claims by outpoint coverage A replayed holder HTLC claim may arrive as a single-outpoint request after earlier requests were merged into a delayed package. Check whether an existing delayed package already covers the new request instead of requiring exact outpoint-set equality. Add focused OnchainTxHandler coverage and a ChannelMonitor regression through claim_funds for both current anchor variants. --- lightning/src/chain/onchaintx.rs | 95 ++++++++++++++++++++++++- lightning/src/ln/monitor_tests.rs | 113 ++++++++++++++++++++++++++++++ 2 files changed, 205 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index e559f093922..2cdad08de1e 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -807,9 +807,10 @@ impl OnchainTxHandler { outpoint.txid, outpoint.vout); false } else { - let timelocked_equivalent_package = self.locktimed_packages.iter().map(|v| v.1.iter()).flatten() - .find(|locked_package| locked_package.outpoints().len() == 1 && locked_package.contains_outpoint(outpoint)); - if let Some(package) = timelocked_equivalent_package { + let timelocked_covering_package = self.locktimed_packages.values() + .flat_map(|packages| packages.iter()) + .find(|locked_package| locked_package.contains_outpoint(outpoint)); + if let Some(package) = timelocked_covering_package { log_info!(logger, "Ignoring second claim for outpoint {}:{}, we already have one which we're waiting on a timelock at {} for.", outpoint.txid, outpoint.vout, package.package_locktime(cur_height)); false @@ -1480,4 +1481,92 @@ mod tests { assert_eq!(txs_broadcasted.len(), 1); assert_eq!(txs_broadcasted[0].lock_time.to_consensus_u32(), 2); } + + #[test] + fn test_duplicate_pending_claim_request_after_force_close_replay() { + let claim_height = 21; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Simulate the force-close path registering the two holder HTLC claims as + // a single delayed package. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // Replaying the same per-HTLC claim requests must match by outpoint + // coverage, otherwise each single-outpoint request would be added again. + tx_handler.update_claims_view_from_requests( + requests, + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!( + tx_handler.locktimed_packages.get(&locktime).map(|packages| packages.len()), + Some(1), + ); + + // At locktime, the delayed package should still yield one bump event + // covering both HTLCs. + tx_handler.update_claims_view_from_requests( + Vec::new(), + locktime, + locktime, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + + let pending_events = tx_handler.get_and_clear_pending_claim_events(); + assert_eq!(pending_events.len(), 1); + assert_eq!(tx_handler.pending_claim_requests.len(), 1); + assert_eq!(tx_handler.claimable_outpoints.len(), 2); + match &pending_events[0].1 { + super::ClaimEvent::BumpHTLC { htlcs, tx_lock_time, .. } => { + assert_eq!(htlcs.len(), 2); + assert_eq!(tx_lock_time.to_consensus_u32(), locktime); + }, + _ => panic!("expected a single HTLC bump event"), + } + } } diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index f52f093917b..43cdfa7d314 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2388,6 +2388,119 @@ fn test_restored_packages_retry() { do_test_restored_packages_retry(true); } +fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { + let chanmon_cfgs = create_chanmon_cfgs(2); + let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let mut anchors_config = test_default_channel_config(); + anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; + anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; + let node_chanmgrs = + create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); + let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + + let coinbase_tx = provide_anchor_reserves(&nodes); + let (_, _, chan_id, funding_tx) = + create_announced_chan_between_nodes_with_value(&nodes, 0, 1, 1_000_000, 50_000_000); + + // Seed two unresolved outbound HTLCs that will be aggregated into one + // delayed holder-commitment package after force close. + route_payment(&nodes[0], &[&nodes[1]], 10_000_000); + route_payment(&nodes[0], &[&nodes[1]], 11_000_000); + + // Add a third incoming HTLC which will later be claimed by preimage after + // the commitment transaction confirms, reproducing the replay path. + let (claim_route, claim_hash, claim_preimage, claim_secret) = + get_route_and_payment_hash!(nodes[1], nodes[0], 12_000_000); + nodes[1] + .node + .send_payment_with_route( + claim_route, + claim_hash, + RecipientOnionFields::secret_only(claim_secret, 12_000_000), + PaymentId(claim_hash.0), + ) + .unwrap(); + check_added_monitors(&nodes[1], 1); + let updates = get_htlc_update_msgs(&nodes[1], &nodes[0].node.get_our_node_id()); + nodes[0] + .node + .handle_update_add_htlc(nodes[1].node.get_our_node_id(), &updates.update_add_htlcs[0]); + do_commitment_signed_dance(&nodes[0], &nodes[1], &updates.commitment_signed, false, false); + expect_and_process_pending_htlcs(&nodes[0], false); + expect_payment_claimable!(nodes[0], claim_hash, claim_secret, 12_000_000); + + // Force-close node 0 so its holder commitment hits chain and its HTLC + // claims are fed into OnchainTxHandler as delayed requests. + let message = "Channel force-closed".to_owned(); + nodes[0] + .node + .force_close_broadcasting_latest_txn( + &chan_id, + &nodes[1].node.get_our_node_id(), + message.clone(), + ) + .unwrap(); + check_added_monitors(&nodes[0], 1); + check_closed_broadcast(&nodes[0], 1, true); + let reason = ClosureReason::HolderForceClosed { broadcasted_latest_txn: Some(true), message }; + check_closed_event(&nodes[0], 1, reason, &[nodes[1].node.get_our_node_id()], 1_000_000); + handle_bump_close_event(&nodes[0]); + + let (commitment_tx, anchor_tx) = { + let mut txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(txn.len(), if p2a_anchor { 2 } else { 1 }); + let anchor_tx = p2a_anchor.then(|| txn.pop().unwrap()); + let commitment_tx = txn.pop().unwrap(); + check_spends!(commitment_tx, funding_tx); + if p2a_anchor { + check_spends!(anchor_tx.as_ref().unwrap(), commitment_tx, coinbase_tx); + } + (commitment_tx, anchor_tx) + }; + + let _ = mine_transaction(&nodes[0], &commitment_tx); + if p2a_anchor { + let _ = mine_transaction(&nodes[0], anchor_tx.as_ref().unwrap()); + } + + // Claim the incoming HTLC after the commitment is confirmed. This + // regenerates a single-outpoint claim request alongside the existing + // delayed package covering the two earlier HTLCs. + nodes[0].node.claim_funds(claim_preimage); + check_added_monitors(&nodes[0], 1); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + + // Once all holder HTLCs are mature, we should see the original two-HTLC + // delayed package plus the replayed single-HTLC claim, not duplicates of + // the delayed package's outpoints. + connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); + + let mut htlc_event_sizes = nodes[0] + .chain_monitor + .chain_monitor + .get_and_clear_pending_events() + .into_iter() + .filter_map(|event| { + if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + htlc_descriptors, .. + }) = event + { + Some(htlc_descriptors.len()) + } else { + None + } + }) + .collect::>(); + htlc_event_sizes.sort_unstable(); + assert_eq!(htlc_event_sizes, vec![1, 2]); +} + +#[test] +fn test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay() { + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(false); + do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(true); +} + fn do_test_monitor_rebroadcast_pending_claims(keyed_anchors: bool, p2a_anchor: bool) { // Test that we will retry broadcasting pending claims for a force-closed channel on every // `ChainMonitor::rebroadcast_pending_claims` call. From 74de5b4ccbf78fb4dda04b6cbd402177496bd8a0 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:23:24 +0200 Subject: [PATCH 4/6] lightning: ignore claims for pending spent outpoints When a transaction spends one outpoint from a delayed package, the split outpoint is tracked as a ContentiousOutpoint while it awaits anti-reorg confirmation. Reject replayed claim requests for those pending-spent outpoints so they are not added back before the spend either matures or reorgs out. Add an OnchainTxHandler regression that replays a holder claim during that pending-spent window and verifies reorg resurrection still works. --- lightning/src/chain/onchaintx.rs | 139 ++++++++++++++++++++++++++++++- 1 file changed, 136 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/onchaintx.rs b/lightning/src/chain/onchaintx.rs index 2cdad08de1e..ac007d988a0 100644 --- a/lightning/src/chain/onchaintx.rs +++ b/lightning/src/chain/onchaintx.rs @@ -576,6 +576,16 @@ impl OnchainTxHandler { self.pending_claim_requests.len() != 0 } + fn is_outpoint_spend_waiting_threshold_conf(&self, outpoint: &BitcoinOutPoint) -> bool { + self.onchain_events_awaiting_threshold_conf.iter().any(|entry| { + if let OnchainEvent::ContentiousOutpoint { ref package } = entry.event { + package.contains_outpoint(outpoint) + } else { + false + } + }) + } + /// Lightning security model (i.e being able to redeem/timeout HTLC or penalize counterparty /// onchain) lays on the assumption of claim transactions getting confirmed before timelock /// expiration (CSV or CLTV following cases). In case of high-fee spikes, claim tx may get stuck @@ -802,7 +812,15 @@ impl OnchainTxHandler { // First drop any duplicate claims. requests.retain(|req| { let outpoint = req.outpoint(); - if self.claimable_outpoints.get(outpoint).is_some() { + if self.is_outpoint_spend_waiting_threshold_conf(outpoint) { + // This is a package-layer guard. ChannelMonitor filters regenerated + // HTLC claims using HTLC resolution state, while this keeps outpoints + // split from an existing package from being re-added during the reorg + // window. + log_info!(logger, "Ignoring claim for outpoint {}:{}, it is already spent by a transaction awaiting anti-reorg confirmation", + outpoint.txid, outpoint.vout); + false + } else if self.claimable_outpoints.get(outpoint).is_some() { log_info!(logger, "Ignoring second claim for outpoint {}:{}, already registered its claiming request", outpoint.txid, outpoint.vout); false @@ -1276,11 +1294,14 @@ impl OnchainTxHandler { #[cfg(test)] mod tests { - use bitcoin::hash_types::Txid; + use bitcoin::hash_types::{BlockHash, Txid}; use bitcoin::hashes::sha256::Hash as Sha256; use bitcoin::hashes::Hash; + use bitcoin::locktime::absolute::LockTime; + use bitcoin::transaction::{OutPoint as BitcoinOutPoint, Version}; use bitcoin::Network; - use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey, ScriptBuf}; + use bitcoin::{key::Secp256k1, secp256k1::PublicKey, secp256k1::SecretKey}; + use bitcoin::{Amount, ScriptBuf, Transaction, TxIn, TxOut}; use types::features::ChannelTypeFeatures; use crate::chain::chaininterface::{ConfirmationTarget, LowerBoundedFeeEstimator}; @@ -1404,6 +1425,18 @@ mod tests { requests } + fn locked_outpoints( + tx_handler: &OnchainTxHandler, locktime: u32, + ) -> Vec { + tx_handler + .locktimed_packages + .get(&locktime) + .into_iter() + .flat_map(|packages| packages.iter()) + .flat_map(|package| package.outpoints().into_iter().map(|outpoint| *outpoint)) + .collect() + } + // Test that all claims with locktime equal to or less than the current height are broadcast // immediately while claims with locktime greater than the current height are only broadcast // once the locktime is reached. @@ -1569,4 +1602,104 @@ mod tests { _ => panic!("expected a single HTLC bump event"), } } + + #[test] + fn test_replayed_claim_ignored_for_pending_spent_outpoint() { + let claim_height = 21; + let spend_height = 22; + let locktime = 42; + let mut nondust_htlcs = Vec::new(); + for i in 0..2 { + let preimage = PaymentPreimage([i + 1; 32]); + let hash = PaymentHash(Sha256::hash(&preimage.0[..]).to_byte_array()); + nondust_htlcs.push(HTLCOutputInCommitment { + offered: true, + amount_msat: 10000, + cltv_expiry: locktime, + payment_hash: hash, + transaction_output_index: Some(i as u32), + }); + } + + let mut tx_handler = new_test_tx_handler( + ChannelTypeFeatures::anchors_zero_htlc_fee_and_dependencies(), + nondust_htlcs, + ); + let requests = build_offered_holder_htlc_requests(&tx_handler); + let spent_outpoint = *requests[0].outpoint(); + let still_delayed_outpoint = *requests[1].outpoint(); + let destination_script = ScriptBuf::new(); + let broadcaster = TestBroadcaster::new(Network::Testnet); + let fee_estimator = TestFeeEstimator::new(253); + let fee_estimator = LowerBoundedFeeEstimator::new(&fee_estimator); + let logger = TestLogger::new(); + + // Register both holder HTLC claims as one delayed package before any + // individual outpoint spends are observed. + tx_handler.update_claims_view_from_requests( + requests.clone(), + claim_height, + claim_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + assert_eq!(locked_outpoints(&tx_handler, locktime).len(), 2); + + // Spend one outpoint before the package matures. The handler should split + // it into a ContentiousOutpoint until the anti-reorg threshold passes. + let spend_tx = Transaction { + version: Version::TWO, + lock_time: LockTime::ZERO, + input: vec![TxIn { previous_output: spent_outpoint, ..Default::default() }], + output: vec![TxOut { value: Amount::from_sat(1000), script_pubkey: ScriptBuf::new() }], + }; + tx_handler.update_claims_view_from_matched_txn( + &[&spend_tx], + spend_height, + BlockHash::all_zeros(), + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + + // Replaying both original claim requests during that window must not + // re-add the already-spent outpoint to the delayed package. + tx_handler.update_claims_view_from_requests( + requests, + spend_height, + spend_height, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked, vec![still_delayed_outpoint]); + assert!(tx_handler.pending_claim_requests.is_empty()); + assert!(tx_handler.claimable_outpoints.is_empty()); + + // If the spend reorgs out, the contentious outpoint is resurrected into + // the delayed package. + tx_handler.blocks_disconnected( + spend_height - 1, + &&broadcaster, + ConfirmationTarget::UrgentOnChainSweep, + &destination_script, + &fee_estimator, + &logger, + ); + let locked = locked_outpoints(&tx_handler, locktime); + assert_eq!(locked.len(), 2); + assert!(locked.contains(&spent_outpoint)); + assert!(locked.contains(&still_delayed_outpoint)); + } } From 4e3025e14b15d033bf3e06ec55cb8b19dbe66615 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:29:33 +0200 Subject: [PATCH 5/6] lightning: skip resolved HTLC claim replays Filter regenerated HTLC claim requests only after ChannelMonitor has persisted final HTLC resolution for the commitment output. This keeps replayed preimage updates from recreating claims once the monitor has durable resolution state, while preserving live conflicting claims before final resolution so they can be retried if a counterparty spend reorgs out. --- lightning/src/chain/channelmonitor.rs | 26 +++++++- lightning/src/ln/monitor_tests.rs | 93 ++++++++++++++++++++++++++- 2 files changed, 116 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/channelmonitor.rs b/lightning/src/chain/channelmonitor.rs index 2442f9c6af1..a2f474b4e65 100644 --- a/lightning/src/chain/channelmonitor.rs +++ b/lightning/src/chain/channelmonitor.rs @@ -4936,7 +4936,10 @@ impl ChannelMonitorImpl { .iter() .filter_map(|(htlc, _)| { if let Some(transaction_output_index) = htlc.transaction_output_index { - if htlc.offered && htlc.payment_hash == matching_payment_hash { + if htlc.offered + && htlc.payment_hash == matching_payment_hash + && !self.is_htlc_output_spent_on_chain(htlc) + { let htlc_data = PackageSolvingData::CounterpartyOfferedHTLCOutput( CounterpartyOfferedHTLCOutput::build( per_commitment_point, @@ -4962,6 +4965,21 @@ impl ChannelMonitorImpl { .collect() } + fn is_htlc_output_spent_on_chain(&self, htlc: &HTLCOutputInCommitment) -> bool { + if let Some(transaction_output_index) = htlc.transaction_output_index { + // Only suppress claims once the monitor has persisted final HTLC + // resolution. While a conflicting spend is still awaiting anti-reorg + // confirmation, a replayed preimage may create a live conflicting + // claim; keeping that claim in OnchainTxHandler preserves retry state + // if the spend reorgs out. + self.htlcs_resolved_on_chain.iter().any(|resolved_htlc| { + resolved_htlc.commitment_tx_output_idx == Some(transaction_output_index) + }) + } else { + false + } + } + /// Returns the HTLC claim requests and the counterparty output info. fn get_counterparty_output_claim_info( &self, funding_spent: &FundingScope, commitment_number: u64, commitment_txid: Txid, @@ -5009,6 +5027,9 @@ impl ChannelMonitorImpl { // per_commitment_data is corrupt or our commitment signing key leaked! return (claimable_outpoints, to_counterparty_output_info); } + if self.is_htlc_output_spent_on_chain(htlc) { + continue; + } let preimage = if htlc.offered { if let Some((p, _)) = self.payment_preimages.get(&htlc.payment_hash) { Some(*p) @@ -5110,6 +5131,9 @@ impl ChannelMonitorImpl { let mut htlcs = Vec::with_capacity(holder_tx.nondust_htlcs().len()); debug_assert_eq!(holder_tx.nondust_htlcs().len(), holder_tx.counterparty_htlc_sigs.len()); for (htlc, counterparty_sig) in holder_tx.nondust_htlcs().iter().zip(holder_tx.counterparty_htlc_sigs.iter()) { + if self.is_htlc_output_spent_on_chain(htlc) { + continue; + } assert!(htlc.transaction_output_index.is_some(), "Expected transaction output index for non-dust HTLC"); let preimage = if htlc.offered { diff --git a/lightning/src/ln/monitor_tests.rs b/lightning/src/ln/monitor_tests.rs index 43cdfa7d314..8979e7f1be9 100644 --- a/lightning/src/ln/monitor_tests.rs +++ b/lightning/src/ln/monitor_tests.rs @@ -2391,12 +2391,15 @@ fn test_restored_packages_retry() { fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anchor: bool) { let chanmon_cfgs = create_chanmon_cfgs(2); let node_cfgs = create_node_cfgs(2, &chanmon_cfgs); + let persister; + let new_chain_monitor; + let node_deserialized; let mut anchors_config = test_default_channel_config(); anchors_config.channel_handshake_config.negotiate_anchors_zero_fee_htlc_tx = true; anchors_config.channel_handshake_config.negotiate_anchor_zero_fee_commitments = p2a_anchor; let node_chanmgrs = create_node_chanmgrs(2, &node_cfgs, &[Some(anchors_config.clone()), Some(anchors_config)]); - let nodes = create_network(2, &node_cfgs, &node_chanmgrs); + let mut nodes = create_network(2, &node_cfgs, &node_chanmgrs); let coinbase_tx = provide_anchor_reserves(&nodes); let (_, _, chan_id, funding_tx) = @@ -2475,11 +2478,14 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc // the delayed package's outpoints. connect_blocks(&nodes[0], TEST_FINAL_CLTV + 1); - let mut htlc_event_sizes = nodes[0] + let events = nodes[0] .chain_monitor .chain_monitor .get_and_clear_pending_events() .into_iter() + .collect::>(); + let mut htlc_event_sizes = events + .iter() .filter_map(|event| { if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { htlc_descriptors, .. @@ -2493,6 +2499,89 @@ fn do_test_duplicate_delayed_holder_htlc_claims_after_claim_funds_replay(p2a_anc .collect::>(); htlc_event_sizes.sort_unstable(); assert_eq!(htlc_event_sizes, vec![1, 2]); + + // Drive only the replayed single-HTLC event on-chain. A preimage replay + // before its CSV-delayed output is final may create a live conflicting + // claim, so the final replay assertion below waits for the monitor's + // persisted resolution state instead. + for event in events { + if let Event::BumpTransaction(event) = event { + let is_single_htlc = if let BumpTransactionEvent::HTLCResolution { + ref htlc_descriptors, + .. + } = event + { + htlc_descriptors.len() == 1 + } else { + false + }; + if is_single_htlc { + nodes[0].bump_tx_handler.handle_event(&event); + break; + } + } + } + let mut htlc_txn = nodes[0].tx_broadcaster.unique_txn_broadcast(); + assert_eq!(htlc_txn.len(), 1); + let htlc_tx = htlc_txn.pop().unwrap(); + mine_transaction(&nodes[0], &htlc_tx); + connect_blocks(&nodes[0], ANTI_REORG_DELAY - 1); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + + // The spend has passed OnchainTxHandler's anti-reorg cleanup, but its + // CSV-delayed output is not yet final according to the monitor. Replaying + // the preimage in this window creates a live conflicting claim, which is + // kept as retry state in case the spend reorgs out. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, + &claim_preimage, + &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), + &nodes[0].logger, + ); + let live_conflict_events = nodes[0] + .chain_monitor + .chain_monitor + .get_and_clear_pending_events() + .into_iter() + .collect::>(); + let mut live_conflict_htlc_event_sizes = live_conflict_events + .iter() + .filter_map(|event| { + if let Event::BumpTransaction(BumpTransactionEvent::HTLCResolution { + htlc_descriptors, .. + }) = event + { + Some(htlc_descriptors.len()) + } else { + None + } + }) + .collect::>(); + live_conflict_htlc_event_sizes.sort_unstable(); + assert_eq!(live_conflict_htlc_event_sizes, vec![1]); + + connect_blocks(&nodes[0], BREAKDOWN_TIMEOUT as u32 - ANTI_REORG_DELAY); + let _ = nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events(); + + // Reload before replaying the preimage so the regression covers persisted + // resolution state, not only in-memory filtering. + let serialized_channel_manager = nodes[0].node.encode(); + let serialized_monitor = get_monitor!(nodes[0], chan_id).encode(); + reload_node!( + nodes[0], &serialized_channel_manager, &[&serialized_monitor], persister, + new_chain_monitor, node_deserialized + ); + + // Replaying the preimage update must not regenerate a claim for the HTLC + // whose commitment output has final persisted resolution state. + get_monitor!(nodes[0], chan_id).provide_payment_preimage_unsafe_legacy( + &claim_hash, &claim_preimage, &node_cfgs[0].tx_broadcaster, + &LowerBoundedFeeEstimator::new(node_cfgs[0].fee_estimator), &nodes[0].logger, + ); + assert!(nodes[0].chain_monitor.chain_monitor.get_and_clear_pending_events().is_empty()); + expect_payment_claimed!(nodes[0], claim_hash, 12_000_000); + check_added_monitors(&nodes[0], 1); } #[test] From 1c3b725edf24131c276da524b4918f8b069c2065 Mon Sep 17 00:00:00 2001 From: Joost Jager Date: Thu, 30 Apr 2026 14:33:27 +0200 Subject: [PATCH 6/6] lightning: canonicalize htlc claim ids Hash HTLC claim outpoints in canonical order so the same logical HTLC set produces the same ClaimId regardless of descriptor order. Add a unit test covering reversed descriptor order. --- lightning/src/chain/mod.rs | 64 ++++++++++++++++++++++++++++++++++++-- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/lightning/src/chain/mod.rs b/lightning/src/chain/mod.rs index d72d58b3149..5ff96e46953 100644 --- a/lightning/src/chain/mod.rs +++ b/lightning/src/chain/mod.rs @@ -563,10 +563,18 @@ pub struct ClaimId(pub [u8; 32]); impl ClaimId { pub(crate) fn from_htlcs(htlcs: &[HTLCDescriptor]) -> ClaimId { + let mut htlc_outpoints = htlcs + .iter() + .map(|htlc| { + (htlc.commitment_txid.to_byte_array(), htlc.htlc.transaction_output_index.unwrap()) + }) + .collect::>(); + htlc_outpoints.sort_unstable(); + let mut engine = Sha256::engine(); - for htlc in htlcs { - engine.input(&htlc.commitment_txid.to_byte_array()); - engine.input(&htlc.htlc.transaction_output_index.unwrap().to_be_bytes()); + for (commitment_txid, transaction_output_index) in htlc_outpoints { + engine.input(&commitment_txid); + engine.input(&transaction_output_index.to_be_bytes()); } ClaimId(Sha256::from_engine(engine).to_byte_array()) } @@ -581,8 +589,45 @@ impl ClaimId { #[cfg(test)] mod tests { use super::*; + use crate::ln::chan_utils::{ + ChannelTransactionParameters, HTLCOutputInCommitment, HolderCommitmentTransaction, + }; + use crate::sign::ChannelDerivationParameters; + use crate::types::payment::{PaymentHash, PaymentPreimage}; use bitcoin::hashes::Hash; + fn dummy_htlc_descriptor( + commitment_txid: Txid, transaction_output_index: u32, + ) -> HTLCDescriptor { + let channel_parameters = ChannelTransactionParameters::test_dummy(100_000); + let htlc = HTLCOutputInCommitment { + offered: true, + amount_msat: 1000, + cltv_expiry: 100, + payment_hash: PaymentHash::from(PaymentPreimage([1; 32])), + transaction_output_index: Some(transaction_output_index), + }; + let funding_outpoint = channel_parameters.funding_outpoint.unwrap(); + let commitment_tx = + HolderCommitmentTransaction::dummy(100_000, funding_outpoint, vec![htlc.clone()]); + let trusted_tx = commitment_tx.trust(); + + HTLCDescriptor { + channel_derivation_parameters: ChannelDerivationParameters { + value_satoshis: channel_parameters.channel_value_satoshis, + keys_id: [1; 32], + transaction_parameters: channel_parameters, + }, + commitment_txid, + per_commitment_number: trusted_tx.commitment_number(), + per_commitment_point: trusted_tx.per_commitment_point(), + feerate_per_kw: trusted_tx.negotiated_feerate_per_kw(), + htlc, + preimage: None, + counterparty_sig: commitment_tx.counterparty_htlc_sigs[0], + } + } + #[test] fn test_best_block() { let hash1 = BlockHash::from_slice(&[1; 32]).unwrap(); @@ -618,4 +663,17 @@ mod tests { let chain_c = BlockLocator::new(hash_other, 200); assert_eq!(chain_a.find_common_ancestor(&chain_c), None); } + + #[test] + fn test_htlc_claim_id_is_descriptor_order_independent() { + // Use opposite txid and vout ordering so the assertion would fail if + // ClaimId still hashed descriptors in caller-provided order. + let first = dummy_htlc_descriptor(Txid::from_slice(&[1; 32]).unwrap(), 2); + let second = dummy_htlc_descriptor(Txid::from_slice(&[2; 32]).unwrap(), 1); + + assert_eq!( + ClaimId::from_htlcs(&[first.clone(), second.clone()]), + ClaimId::from_htlcs(&[second, first]) + ); + } }