From ea771caa4f8ef04ea2bee95f7b8f46e7d1625437 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:22:26 +0100 Subject: [PATCH 01/59] added heuristic for block rejections timeout --- .../src/nakamoto_node/signer_coordinator.rs | 43 +++++++++++++----- .../src/nakamoto_node/stackerdb_listener.rs | 45 +++++++++---------- testnet/stacks-node/src/tests/signer/v0.rs | 45 +++++++++++++++++++ 3 files changed, 98 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f3df78c66b..d409e73445 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -16,6 +16,7 @@ use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +use std::time::Instant; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -37,7 +38,9 @@ use stacks::util_lib::boot::boot_code_id; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; +use crate::nakamoto_node::stackerdb_listener::{ + BlockStatus, StackerDBListener, EVENT_RECEIVER_POLL, +}; use crate::neon::Counters; use crate::Config; @@ -270,17 +273,19 @@ impl SignerCoordinator { burn_tip: &BlockSnapshot, counters: &Counters, ) -> Result, NakamotoNodeError> { + let mut rejections_timer = Instant::now(); + let mut rejections: u32 = 0; + let mut rejections_timeout = core::time::Duration::from_secs(600); + let mut block_status_tracker = BlockStatus::default(); loop { + /// + /// TODO: describe the logic let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, + &mut block_status_tracker, + rejections_timer, + rejections_timeout, EVENT_RECEIVER_POLL, - |status| { - status.total_weight_signed < self.weight_threshold - && status - .total_reject_weight - .saturating_add(self.weight_threshold) - <= self.total_weight - }, )? { Some(status) => status, None => { @@ -313,10 +318,26 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } + if rejections_timer.elapsed() > rejections_timeout { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Gave up while tried reaching the threshold".into(), + )); + } + continue; } }; + if rejections != block_status.total_reject_weight { + rejections_timer = Instant::now(); + rejections = block_status.total_reject_weight; + rejections_timeout = core::time::Duration::from_secs_f32( + 600 as f32 + - (600 as f32 + * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), + ); + } + if block_status .total_reject_weight .saturating_add(self.weight_threshold) @@ -334,10 +355,12 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else { + } else if rejections_timer.elapsed() > rejections_timeout { return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unblocked without reaching the threshold".into(), + "Gave up while tried reaching the threshold".into(), )); + } else { + continue; } } } diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 834c59fa95..92688c0075 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -50,7 +50,7 @@ pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFla /// waking up to check timeouts? pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct BlockStatus { pub responded_signers: HashSet, pub gathered_signatures: BTreeMap, @@ -337,10 +337,8 @@ impl StackerDBListener { block.gathered_signatures.insert(slot_id, signature); block.responded_signers.insert(signer_pubkey); - if block.total_weight_signed >= self.weight_threshold { - // Signal to anyone waiting on this block that we have enough signatures - cvar.notify_all(); - } + // Signal to anyone waiting on this block that we have a new status + cvar.notify_all(); // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -378,6 +376,7 @@ impl StackerDBListener { } }; block.responded_signers.insert(rejected_pubkey); + block.total_reject_weight = block .total_reject_weight .checked_add(signer_entry.weight) @@ -396,14 +395,8 @@ impl StackerDBListener { "server_version" => rejected_data.metadata.server_version, ); - if block - .total_reject_weight - .saturating_add(self.weight_threshold) - > self.total_weight - { - // Signal to anyone waiting on this block that we have enough rejections - cvar.notify_all(); - } + // Signal to anyone waiting on this block that we have a new status + cvar.notify_all(); // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -487,30 +480,32 @@ impl StackerDBListenerComms { /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. - /// If the block is found, call `condition` to check if the block status - /// satisfies the condition. - /// If the condition is satisfied, return the block status as - /// `Ok(Some(status))`. - /// If the condition is not satisfied, wait for it to be satisfied. + /// If the block is found, return it. /// If the timeout is reached, return `Ok(None)`. - pub fn wait_for_block_status( + pub fn wait_for_block_status( &self, block_signer_sighash: &Sha512Trunc256Sum, + block_status_tracker: &mut BlockStatus, + rejections_timer: std::time::Instant, + rejections_timeout: Duration, timeout: Duration, - condition: F, - ) -> Result, NakamotoNodeError> - where - F: Fn(&BlockStatus) -> bool, - { + ) -> Result, NakamotoNodeError> { let (lock, cvar) = &*self.blocks; let blocks = lock.lock().expect("FATAL: failed to lock block status"); let (guard, timeout_result) = cvar .wait_timeout_while(blocks, timeout, |map| { + if rejections_timer.elapsed() > rejections_timeout { + return true; + } let Some(status) = map.get(block_signer_sighash) else { return true; }; - condition(status) + if status != block_status_tracker { + *block_status_tracker = status.clone(); + return false; + } + return true; }) .expect("FATAL: failed to wait on block status cond var"); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 190145279f..5b92bd47f7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7733,6 +7733,51 @@ fn block_validation_response_timeout() { ); } +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. +#[test] +#[ignore] +fn block_validation_check_rejection_timeout_heuristic() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let timeout = Duration::from_secs(30); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |_| {}, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + signer_test.boot_to_epoch_3(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); +} + /// Test scenario: /// /// - when a signer submits a block validation request and From 6ab756b201a064fcbf3a0bb824a605187e34e894 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:33:14 +0100 Subject: [PATCH 02/59] added rejections_timeout test exposure via BLOCK_REJECTIONS_CURRENT_TIMEOUT --- .../src/nakamoto_node/signer_coordinator.rs | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 146e767b24..f018852e9e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -44,6 +44,24 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; +#[cfg(test)] +use std::time::Duration; + +#[cfg(test)] +use stacks_common::util::tests::TestFlag; + +#[cfg(test)] +use std::sync::LazyLock; + +#[cfg(test)] +/// Test-only value for storing the current rejection based timeout +/// Used to test that the signers will broadcast a block if it gets enough signatures +pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = + LazyLock::new(TestFlag::default); + +/// Base timeout for rejections heuristic +pub static BLOCK_REJECTIONS_TIMEOUT_BASE: u64 = 600; + /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -298,11 +316,12 @@ impl SignerCoordinator { ) -> Result, NakamotoNodeError> { let mut rejections_timer = Instant::now(); let mut rejections: u32 = 0; - let mut rejections_timeout = core::time::Duration::from_secs(600); + let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); let mut block_status_tracker = BlockStatus::default(); loop { /// /// TODO: describe the logic + /// let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, &mut block_status_tracker, @@ -355,10 +374,13 @@ impl SignerCoordinator { rejections_timer = Instant::now(); rejections = block_status.total_reject_weight; rejections_timeout = core::time::Duration::from_secs_f32( - 600 as f32 - - (600 as f32 + BLOCK_REJECTIONS_TIMEOUT_BASE as f32 + - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), ); + + #[cfg(test)] + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(rejections_timeout); } if block_status From bfd47939cce6cab5277c307497fc649e6ed47f53 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:55:05 +0100 Subject: [PATCH 03/59] improved test logic --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c6b2367290..345e9d8e90 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -75,6 +75,7 @@ use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEM use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; +use crate::nakamoto_node::signer_coordinator::BLOCK_REJECTIONS_CURRENT_TIMEOUT; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; @@ -7842,8 +7843,13 @@ fn block_validation_check_rejection_timeout_heuristic() { TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + info!("------------------------- Test Mine and Verify Rejected Nakamoto Block -------------------------"); + signer_test.mine_nakamoto_block(timeout, true); + signer_test + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) + .unwrap(); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 400); } /// Test scenario: From 3cebb352e2ff379491b08e5b2077be0b4664da8c Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 17:57:24 +0100 Subject: [PATCH 04/59] fixed integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 46 ++++++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 345e9d8e90..005c09c034 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7840,16 +7840,56 @@ fn block_validation_check_rejection_timeout_heuristic() { signer_test.boot_to_epoch_3(); + // note we just use mined nakamoto_blocks as the second block is not going to be confirmed + + info!("------------------------- Check Rejections-based timeout with 1 rejection -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[4]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ + all_signers[0], + all_signers[1], + all_signers[2], + all_signers[3], + ]); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[4]]) + .unwrap(); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 551); + + info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); - info!("------------------------- Test Mine and Verify Rejected Nakamoto Block -------------------------"); - signer_test.mine_nakamoto_block(timeout, true); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + signer_test .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 400); + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 404); + + // reset reject/ignore + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); } /// Test scenario: From c3e163e6171934ec0519352d59189c861354957b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:17:28 +0100 Subject: [PATCH 05/59] fixed comment for BLOCK_REJECTIONS_CURRENT_TIMEOUT --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f018852e9e..757d5943d3 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -55,7 +55,6 @@ use std::sync::LazyLock; #[cfg(test)] /// Test-only value for storing the current rejection based timeout -/// Used to test that the signers will broadcast a block if it gets enough signatures pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = LazyLock::new(TestFlag::default); From 6b751c406843ed16709b93cf239fe1dd70f91a1d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:19:35 +0100 Subject: [PATCH 06/59] fixed test comment --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 30b1ffbe52..ddde8bd5da 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7803,10 +7803,8 @@ fn block_validation_response_timeout() { ); } -// Ensures that a signer that successfully submits a block to the node for validation -// will issue ConnectivityIssues rejections if a block submission times out. -// Also ensures that no other proposal gets submitted for validation if we -// are already waiting for a block submission response. +// Verify that the miner timeout while waiting for signers will change accordingly +// to rejections. #[test] #[ignore] fn block_validation_check_rejection_timeout_heuristic() { From bb8df99742cf37723d3858bcc581d822f6c41b7a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:23:19 +0100 Subject: [PATCH 07/59] fmt --- .../src/nakamoto_node/signer_coordinator.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 757d5943d3..4aabe006e6 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -14,8 +14,12 @@ // along with this program. If not, see . use std::sync::atomic::AtomicBool; +#[cfg(test)] +use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +#[cfg(test)] +use std::time::Duration; use std::time::Instant; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; @@ -34,6 +38,8 @@ use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -44,15 +50,6 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; -#[cfg(test)] -use std::time::Duration; - -#[cfg(test)] -use stacks_common::util::tests::TestFlag; - -#[cfg(test)] -use std::sync::LazyLock; - #[cfg(test)] /// Test-only value for storing the current rejection based timeout pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = @@ -318,9 +315,9 @@ impl SignerCoordinator { let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); let mut block_status_tracker = BlockStatus::default(); loop { - /// - /// TODO: describe the logic - /// + // At every iteration wait for the block_status. + // Exit when the amount of confirmations/rejections reach the threshold (or until timeout) + // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, &mut block_status_tracker, From 280483b7555ae482cfdec75293e0465a1ad5ad9e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:26:50 +0100 Subject: [PATCH 08/59] added more comments --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4aabe006e6..f3044de972 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -310,13 +310,17 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { + // this is used to track the start of the waiting cycle let mut rejections_timer = Instant::now(); + // the amount of current rejections to eventually modify the timeout let mut rejections: u32 = 0; + // default timeout let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { // At every iteration wait for the block_status. - // Exit when the amount of confirmations/rejections reach the threshold (or until timeout) + // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, From 05b3f92c6c96ff57e4d05c5caded32885037dbf5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 23 Jan 2025 15:51:08 +0100 Subject: [PATCH 09/59] fixed formatting --- .../stacks-node/src/nakamoto_node/signer_coordinator.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f3044de972..818f1f6a08 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -18,9 +18,7 @@ use std::sync::atomic::AtomicBool; use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; -#[cfg(test)] -use std::time::Duration; -use std::time::Instant; +use std::time::{Duration, Instant}; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -315,7 +313,7 @@ impl SignerCoordinator { // the amount of current rejections to eventually modify the timeout let mut rejections: u32 = 0; // default timeout - let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + let mut rejections_timeout = Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { @@ -373,7 +371,7 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight { rejections_timer = Instant::now(); rejections = block_status.total_reject_weight; - rejections_timeout = core::time::Duration::from_secs_f32( + rejections_timeout = Duration::from_secs_f32( BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), From 2f2cb53f66291d8ee46b2837f97bf5b97df82bfa Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:31:06 -0500 Subject: [PATCH 10/59] chore: Apply Clippy lint `redundant_pattern_matching` --- stackslib/src/burnchains/db.rs | 3 ++- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/chainstate/stacks/boot/contract_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- stackslib/src/chainstate/stacks/db/mod.rs | 5 +---- stackslib/src/chainstate/stacks/index/file.rs | 2 +- stackslib/src/chainstate/stacks/index/test/marf.rs | 2 +- stackslib/src/clarity_cli.rs | 8 ++++---- stackslib/src/clarity_vm/database/mod.rs | 6 ++++-- stackslib/src/main.rs | 2 +- stackslib/src/net/tests/inv/nakamoto.rs | 2 +- 11 files changed, 18 insertions(+), 18 deletions(-) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 2571c532ed..47af6b3ae6 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1193,9 +1193,10 @@ impl BurnchainDB { let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); for op in ops { - if let Some(_) = indexer + if indexer .find_burnchain_header_height(&op.burn_header_hash()) .expect("FATAL: burnchain DB query error") + .is_some() { // this is the op on the canonical fork return Some(op); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 7584af67d3..c15c48e3fd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2476,7 +2476,7 @@ impl NakamotoChainState { ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block - if let Some(_) = Self::get_block_header(headers_conn, &block.header.block_id())? { + if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { debug!("Already have block {}", &block.header.block_id()); return Ok(false); } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 2fb95a5ace..11cbc1fd46 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -486,7 +486,7 @@ impl BurnStateDB for TestSimBurnStateDB { height: u32, sortition_id: &SortitionId, ) -> Option<(Vec, u128)> { - if let Some(_) = self.get_burn_header_hash(height, sortition_id) { + if self.get_burn_header_hash(height, sortition_id).is_some() { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 30b38c10cc..d3a4dc8a5a 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5147,7 +5147,7 @@ impl StacksChainState { ) { Ok(miner_rewards_opt) => miner_rewards_opt, Err(e) => { - if let Some(_) = miner_id_opt { + if miner_id_opt.is_some() { return Err(e); } else { let msg = format!("Failed to load miner rewards: {:?}", &e); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6853ec0ee9..c09b2fcbab 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1839,10 +1839,7 @@ impl StacksChainState { let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; - let init_required = match fs::metadata(&clarity_state_index_marf) { - Ok(_) => false, - Err(_) => true, - }; + let init_required = fs::metadata(&clarity_state_index_marf).is_err(); let state_index = StacksChainState::open_db(mainnet, chain_id, &header_index_root)?; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 5a7da69e52..3940cb594e 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -213,7 +213,7 @@ impl TrieFile { let mut set_sqlite_tmpdir = false; let mut old_tmpdir_opt = None; if let Some(parent_path) = Path::new(db_path).parent() { - if let Err(_) = env::var("SQLITE_TMPDIR") { + if env::var("SQLITE_TMPDIR").is_err() { debug!( "Sqlite will store temporary migration state in '{}'", parent_path.display() diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 63b2b58968..4f2b06a480 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -2190,7 +2190,7 @@ fn test_marf_begin_from_sentinel_twice() { #[test] fn test_marf_unconfirmed() { let marf_path = "/tmp/test_marf_unconfirmed"; - if let Ok(_) = std::fs::metadata(marf_path) { + if std::fs::metadata(marf_path).is_ok() { std::fs::remove_file(marf_path).unwrap(); } let marf_opts = MARFOpenOpts::default(); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 9862281b6c..a580e90ee9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -645,7 +645,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -660,7 +660,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -674,7 +674,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -690,7 +690,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 6f770f5927..e03149dba4 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -737,13 +737,15 @@ fn get_first_block_in_tenure( } } None => { - if let Some(_) = get_stacks_header_column_from_table( + if get_stacks_header_column_from_table( conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), false, - ) { + ) + .is_some() + { return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2e63d0d128..90f6dfeecd 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1523,7 +1523,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if let Some(_) = stacks_blocks_available.get(&stacks_block_id) { + if stacks_blocks_available.get(&stacks_block_id).is_some() { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index cb09236ccb..c313ede598 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -146,7 +146,7 @@ pub fn peer_get_nakamoto_invs<'a>( loop { peer.step_with_ibd(false).unwrap(); - if let Ok(..) = shutdown_recv.try_recv() { + if shutdown_recv.try_recv().is_ok() { break; } } From da64ceca3f2562e07ce6484e508dbd5c2de0650f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:47:38 -0500 Subject: [PATCH 11/59] chore: Apply Clippy lint `single_match` --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +- stackslib/src/burnchains/tests/mod.rs | 9 +- stackslib/src/chainstate/stacks/block.rs | 57 ++-- .../src/chainstate/stacks/boot/pox_2_tests.rs | 29 +- .../src/chainstate/stacks/boot/pox_3_tests.rs | 15 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 15 +- stackslib/src/chainstate/stacks/db/blocks.rs | 30 +-- stackslib/src/chainstate/stacks/db/mod.rs | 21 +- .../src/chainstate/stacks/index/cache.rs | 14 +- stackslib/src/chainstate/stacks/index/file.rs | 14 +- stackslib/src/chainstate/stacks/index/marf.rs | 5 +- .../src/chainstate/stacks/index/proofs.rs | 12 +- .../src/chainstate/stacks/index/storage.rs | 5 +- .../src/chainstate/stacks/index/test/marf.rs | 16 +- stackslib/src/chainstate/stacks/miner.rs | 39 ++- stackslib/src/chainstate/stacks/mod.rs | 7 +- stackslib/src/chainstate/stacks/tests/mod.rs | 21 +- .../src/chainstate/stacks/transaction.rs | 48 ++-- stackslib/src/core/mempool.rs | 7 +- stackslib/src/net/atlas/mod.rs | 69 +++-- stackslib/src/net/chat.rs | 29 +- stackslib/src/net/connection.rs | 36 ++- stackslib/src/net/dns.rs | 33 +-- stackslib/src/net/download/epoch2x.rs | 28 +- stackslib/src/net/http/request.rs | 34 +-- stackslib/src/net/httpcore.rs | 29 +- stackslib/src/net/inv/epoch2x.rs | 41 ++- stackslib/src/net/mod.rs | 28 +- stackslib/src/net/neighbors/mod.rs | 7 +- stackslib/src/net/p2p.rs | 67 ++--- stackslib/src/net/prune.rs | 37 +-- stackslib/src/net/relay.rs | 79 +++--- stackslib/src/net/server.rs | 87 +++--- stackslib/src/net/tests/convergence.rs | 85 +++--- stackslib/src/net/tests/download/epoch2x.rs | 88 ++---- stackslib/src/net/tests/inv/epoch2x.rs | 198 ++++++-------- stackslib/src/net/tests/inv/nakamoto.rs | 22 +- stackslib/src/net/tests/neighbors.rs | 255 ++++++------------ stackslib/src/net/tests/relay/epoch2x.rs | 25 +- stackslib/src/net/unsolicited.rs | 23 +- stackslib/src/util_lib/mod.rs | 11 +- 41 files changed, 663 insertions(+), 1033 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 129a4b5a91..509eb61e79 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -282,11 +282,8 @@ impl BitcoinIndexer { btc_error::ConnectionError })?; - match self.runtime.sock.take() { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = self.runtime.sock.take() { + let _ = s.shutdown(Shutdown::Both); } self.runtime.sock = Some(s); @@ -294,11 +291,8 @@ impl BitcoinIndexer { } Err(_e) => { let s = self.runtime.sock.take(); - match s { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = s { + let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) } @@ -932,11 +926,8 @@ impl BitcoinIndexer { impl Drop for BitcoinIndexer { fn drop(&mut self) { - match self.runtime.sock { - Some(ref mut s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(ref mut s) = self.runtime.sock { + let _ = s.shutdown(Shutdown::Both); } } } diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 23232ac3b4..91ae93bb3f 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -580,12 +580,9 @@ impl TestBurnchainBlock { assert_eq!(parent_snapshot.block_height + 1, self.block_height); for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); } } } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index a335e21894..1c231b8efc 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -353,16 +353,13 @@ impl StacksMessageCodec for StacksBlock { // must be only one coinbase let mut coinbase_count = 0; for tx in txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - coinbase_count += 1; - if coinbase_count > 1 { - return Err(codec_error::DeserializeError( - "Invalid block: multiple coinbases found".to_string(), - )); - } + if let TransactionPayload::Coinbase(..) = tx.payload { + coinbase_count += 1; + if coinbase_count > 1 { + return Err(codec_error::DeserializeError( + "Invalid block: multiple coinbases found".to_string(), + )); } - _ => {} } } @@ -518,26 +515,23 @@ impl StacksBlock { let mut found_coinbase = false; let mut coinbase_index = 0; for (i, tx) in txs.iter().enumerate() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - if !check_present { - warn!("Found unexpected coinbase tx {}", tx.txid()); - return false; - } - - if found_coinbase { - warn!("Found duplicate coinbase tx {}", tx.txid()); - return false; - } - - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { - warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); - return false; - } - found_coinbase = true; - coinbase_index = i; + if let TransactionPayload::Coinbase(..) = tx.payload { + if !check_present { + warn!("Found unexpected coinbase tx {}", tx.txid()); + return false; + } + + if found_coinbase { + warn!("Found duplicate coinbase tx {}", tx.txid()); + return false; } - _ => {} + + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { + warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); + return false; + } + found_coinbase = true; + coinbase_index = i; } } @@ -1150,11 +1144,8 @@ mod test { let mut txs_anchored = vec![]; for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 3313e80c7f..4256fba3b9 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1371,23 +1371,20 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == charlie_address { - assert!( - r.execution_cost != ExecutionCost::ZERO, - "Execution cost is not zero!" - ); - charlie_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + assert!( + r.execution_cost != ExecutionCost::ZERO, + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 5c52297969..d42095b923 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -930,16 +930,13 @@ fn pox_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 840e7a2c54..5005dd8781 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -9184,16 +9184,13 @@ fn missed_slots_no_unlock() { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index d3a4dc8a5a..6c3c745a45 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11206,15 +11206,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = @@ -11889,15 +11886,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index c09b2fcbab..1d7c97b676 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,11 +2746,8 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances @@ -2866,11 +2863,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = @@ -2956,11 +2950,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index d5ba5ae5f6..2d5cd556b8 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -258,12 +258,11 @@ impl TrieCache { TrieCache::Everything(ref mut state) => { state.store_node_and_hash(block_id, trieptr, node, hash); } - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node_and_hash(block_id, trieptr, TrieNodeType::Node256(data), hash); } - _ => {} - }, + } } } @@ -273,12 +272,11 @@ impl TrieCache { match self { TrieCache::Noop(_) => {} TrieCache::Everything(ref mut state) => state.store_node(block_id, trieptr, node), - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node(block_id, trieptr, TrieNodeType::Node256(data)) } - _ => {} - }, + } } } diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 3940cb594e..52f571aa1f 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -194,11 +194,8 @@ impl TrieFile { .map(|stat| Some(stat.len())) .unwrap_or(None); - match (size_before_opt, size_after_opt) { - (Some(sz_before), Some(sz_after)) => { - debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); - } - _ => {} + if let (Some(sz_before), Some(sz_after)) = (size_before_opt, size_after_opt) { + debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); } Ok(()) @@ -461,11 +458,8 @@ impl TrieFile { self.write_all(buf)?; self.flush()?; - match self { - TrieFile::Disk(ref mut data) => { - data.fd.sync_data()?; - } - _ => {} + if let TrieFile::Disk(ref mut data) = self { + data.fd.sync_data()?; } Ok(offset) } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index de1488d057..cfb5a97594 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1291,9 +1291,8 @@ impl MARF { // used in testing in order to short-circuit block-height lookups // when the trie struct is tested outside of marf.rs usage if height == 0 { - match storage.test_genesis_block { - Some(ref s) => return Ok(Some(s.clone())), - _ => {} + if let Some(ref s) = storage.test_genesis_block { + return Ok(Some(s.clone())); } } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 37ff420437..e7ba01a6bf 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -1213,12 +1213,12 @@ impl TrieMerkleProof { }; // next proof item should be part of a segment proof - match proof[i] { - TrieMerkleProofType::Shunt(_) => { - test_debug!("Malformed proof -- exepcted segment proof following first shunt proof head at {}", i); - return false; - } - _ => {} + if let TrieMerkleProofType::Shunt(_) = proof[i] { + test_debug!( + "Malformed proof -- exepcted segment proof following first shunt proof head at {}", + i + ); + return false; } while i < proof.len() { diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 79b391ce42..fb9637c799 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -1887,9 +1887,8 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // blow away db trie_sql::clear_tables(self.sqlite_tx())?; - match self.data.uncommitted_writes { - Some((_, ref mut trie_storage)) => trie_storage.format()?, - None => {} + if let Some((_, ref mut trie_storage)) = self.data.uncommitted_writes { + trie_storage.format()? }; self.data.set_block(T::sentinel(), None); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 4f2b06a480..7102527ba8 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,11 +1282,8 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); let f = TrieFileStorage::open(&path, marf_opts).unwrap(); @@ -1567,12 +1564,9 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - match fs::metadata(&path) { - Err(_) => { - eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); - return; - } - Ok(_) => {} + if let Err(_) = fs::metadata(&path) { + eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); + return; }; let marf_opts = MARFOpenOpts::default(); let mut f_store = TrieFileStorage::new_memory(marf_opts).unwrap(); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 43fd6b3c18..aca3f9d84c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1147,24 +1147,20 @@ impl<'a> StacksMicroblockBuilder<'a> { TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { error, .. }) => { test_debug!("Exclude tx {} from microblock", tx.txid()); - match &error { - Error::BlockTooBigError => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - test_debug!("Block budget exceeded on tx {}", &tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - test_debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit - == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - test_debug!( - "Stop mining microblock block due to limit exceeded" - ); - break; - } + if let Error::BlockTooBigError = &error { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + test_debug!("Block budget exceeded on tx {}", &tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + test_debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + test_debug!( + "Stop mining microblock block due to limit exceeded" + ); + break; } - _ => {} } continue; } @@ -1198,12 +1194,9 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.considered.replace(considered); self.runtime.num_mined = num_txs; - match result { - Err(e) => { - warn!("Error producing microblock: {}", e); - return Err(e); - } - _ => {} + if let Err(e) = result { + warn!("Error producing microblock: {}", e); + return Err(e); } return self.make_next_microblock(txs_included, miner_key, tx_events, None); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 23990fe199..546ab6bd08 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1590,11 +1590,8 @@ pub mod test { } for tx in all_txs.into_iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); if txs_anchored.len() >= num_txs { diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 54dcea1c7e..d119dacd8e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,11 +338,8 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - match fs::metadata(&chainstate_path(new_test_name)) { - Ok(_) => { - fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } copy_dir( @@ -525,17 +522,14 @@ impl TestStacksNode { miner: &TestMiner, ) -> Option { for commit_op in miner.block_commits.iter().rev() { - match SortitionDB::get_block_snapshot_for_winning_stacks_block( + if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() { - Some(sn) => { - return Some(sn); - } - None => {} + return Some(sn); } } return None; @@ -1424,11 +1418,8 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index c0fa7f1727..0308a8124b 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1130,17 +1130,14 @@ impl StacksTransactionSigner { } pub fn sign_sponsor(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { - match self.tx.auth { - TransactionAuth::Sponsored(_, ref sponsor_condition) => { - if self.check_oversign - && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() - { - return Err(net_error::SigningError( - "Sponsor would have too many signatures".to_string(), - )); - } + if let TransactionAuth::Sponsored(_, ref sponsor_condition) = self.tx.auth { + if self.check_oversign + && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() + { + return Err(net_error::SigningError( + "Sponsor would have too many signatures".to_string(), + )); } - _ => {} } let next_sighash = self.tx.sign_next_sponsor(&self.sighash, privk)?; @@ -1933,24 +1930,21 @@ mod test { // test_debug!("mutate byte {}", &i); let mut cursor = io::Cursor::new(&tx_bytes); let mut reader = LogReader::from_reader(&mut cursor); - match StacksTransaction::consensus_deserialize(&mut reader) { - Ok(corrupt_tx) => { - let mut corrupt_tx_bytes = vec![]; - corrupt_tx - .consensus_serialize(&mut corrupt_tx_bytes) - .unwrap(); - if corrupt_tx_bytes.len() < tx_bytes.len() { - // didn't parse fully; the block-parsing logic would reject this block. - tx_bytes[i] = next_byte as u8; - continue; - } - if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { - eprintln!("corrupt tx: {:#?}", &corrupt_tx); - eprintln!("signed tx: {:#?}", &signed_tx); - assert!(false); - } + if let Ok(corrupt_tx) = StacksTransaction::consensus_deserialize(&mut reader) { + let mut corrupt_tx_bytes = vec![]; + corrupt_tx + .consensus_serialize(&mut corrupt_tx_bytes) + .unwrap(); + if corrupt_tx_bytes.len() < tx_bytes.len() { + // didn't parse fully; the block-parsing logic would reject this block. + tx_bytes[i] = next_byte as u8; + continue; + } + if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { + eprintln!("corrupt tx: {:#?}", &corrupt_tx); + eprintln!("signed tx: {:#?}", &signed_tx); + assert!(false); } - Err(_) => {} } // restore tx_bytes[i] = next_byte as u8; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 589b624abe..17848fa2d2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1092,11 +1092,8 @@ impl NonceCache { }; // In-memory cache - match self.cache.get_mut(&address) { - Some(nonce) => { - *nonce = value; - } - None => (), + if let Some(nonce) = self.cache.get_mut(&address) { + *nonce = value; } success diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c382aa618d..49d1036a0b 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -195,45 +195,42 @@ impl AttachmentInstance { ) -> Option { if let Value::Tuple(ref attachment) = value { if let Ok(Value::Tuple(ref attachment_data)) = attachment.get("attachment") { - match ( + if let ( + Ok(Value::Sequence(SequenceData::Buffer(content_hash))), + Ok(Value::UInt(attachment_index)), + ) = ( attachment_data.get("hash"), attachment_data.get("attachment-index"), ) { - ( - Ok(Value::Sequence(SequenceData::Buffer(content_hash))), - Ok(Value::UInt(attachment_index)), - ) => { - let content_hash = if content_hash.data.is_empty() { - Hash160::empty() - } else { - match Hash160::from_bytes(&content_hash.data[..]) { - Some(content_hash) => content_hash, - _ => return None, - } - }; - let metadata = match attachment_data.get("metadata") { - Ok(metadata) => { - let mut serialized = vec![]; - metadata - .consensus_serialize(&mut serialized) - .expect("FATAL: invalid metadata"); - to_hex(&serialized[..]) - } - _ => String::new(), - }; - let instance = AttachmentInstance { - index_block_hash, - content_hash, - attachment_index: *attachment_index as u32, - stacks_block_height, - metadata, - contract_id: contract_id.clone(), - tx_id, - canonical_stacks_tip_height, - }; - return Some(instance); - } - _ => {} + let content_hash = if content_hash.data.is_empty() { + Hash160::empty() + } else { + match Hash160::from_bytes(&content_hash.data[..]) { + Some(content_hash) => content_hash, + _ => return None, + } + }; + let metadata = match attachment_data.get("metadata") { + Ok(metadata) => { + let mut serialized = vec![]; + metadata + .consensus_serialize(&mut serialized) + .expect("FATAL: invalid metadata"); + to_hex(&serialized[..]) + } + _ => String::new(), + }; + let instance = AttachmentInstance { + index_block_hash, + content_hash, + attachment_index: *attachment_index as u32, + stacks_block_height, + metadata, + contract_id: contract_id.clone(), + tx_id, + canonical_stacks_tip_height, + }; + return Some(instance); } } } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 2dea34245b..0ce27038cd 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -515,13 +515,12 @@ impl Neighbor { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer use std::env; - match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) { - Ok(asn_str) => { - neighbor.asn = asn_str.parse().unwrap(); - neighbor.org = neighbor.asn; - test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); - } - Err(_) => {} + if let Ok(asn_str) = + env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) + { + neighbor.asn = asn_str.parse().unwrap(); + neighbor.org = neighbor.asn; + test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); }; } @@ -544,13 +543,10 @@ impl Neighbor { let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes).map_err(net_error::DBError)?; - match asn_opt { - Some(a) => { - if a != 0 { - peer.asn = a; - } + if let Some(a) = asn_opt { + if a != 0 { + peer.asn = a; } - None => {} }; } Ok(Some(peer)) @@ -3110,11 +3106,8 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 1d0eabdd14..0fe48a678b 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -926,19 +926,16 @@ impl ConnectionInbox

{ let bytes_consumed = if let Some(ref mut preamble) = preamble_opt { let (message_opt, bytes_consumed) = self.consume_payload(protocol, preamble, &buf[offset..])?; - match message_opt { - Some(message) => { - // queue up - test_debug!( - "Consumed message '{}' (request {}) in {} bytes", - message.get_message_name(), - message.request_id(), - bytes_consumed - ); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!( + "Consumed message '{}' (request {}) in {} bytes", + message.get_message_name(), + message.request_id(), + bytes_consumed + ); + self.inbox.push_back(message); + consumed_message = true; }; bytes_consumed @@ -982,14 +979,11 @@ impl ConnectionInbox

{ if let Some(ref mut preamble) = preamble_opt { let (message_opt, _bytes_consumed) = self.consume_payload(protocol, preamble, &[])?; - match message_opt { - Some(message) => { - // queue up - test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); + self.inbox.push_back(message); + consumed_message = true; } } self.preamble = preamble_opt; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 6529001d7d..77c401d5e2 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -377,13 +377,10 @@ mod test { let mut resolved_addrs = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("www.google.com", 80).unwrap() { - Some(addrs) => { - test_debug!("addrs: {:?}", &addrs); - resolved_addrs = Some(addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup("www.google.com", 80).unwrap() { + test_debug!("addrs: {:?}", &addrs); + resolved_addrs = Some(addrs); + break; } sleep_ms(100); } @@ -423,13 +420,10 @@ mod test { if resolved_addrs.contains_key(&name.to_string()) { continue; } - match client.poll_lookup(name, 80).unwrap() { - Some(addrs) => { - test_debug!("name {} addrs: {:?}", name, &addrs); - resolved_addrs.insert(name.to_string(), addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup(name, 80).unwrap() { + test_debug!("name {} addrs: {:?}", name, &addrs); + resolved_addrs.insert(name.to_string(), addrs); + break; } } @@ -452,13 +446,10 @@ mod test { let mut resolved_error = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("asdfjkl;", 80).unwrap() { - Some(resp) => { - test_debug!("addrs: {:?}", &resp); - resolved_error = Some(resp); - break; - } - None => {} + if let Some(resp) = client.poll_lookup("asdfjkl;", 80).unwrap() { + test_debug!("addrs: {:?}", &resp); + resolved_error = Some(resp); + break; } sleep_ms(100); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 6d0bb63d5a..06f4e146fa 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -1045,9 +1045,8 @@ impl PeerNetwork { /// Pass a hint to the downloader to re-scan pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { - match self.block_downloader { - Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), - None => {} + if let Some(ref mut dl) = self.block_downloader { + dl.hint_download_rescan(target_height, ibd) } } @@ -1978,11 +1977,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2016,11 +2014,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2480,9 +2477,8 @@ impl PeerNetwork { if done { // reset state if we're done - match self.block_downloader { - Some(ref mut downloader) => downloader.reset(), - None => {} + if let Some(ref mut downloader) = self.block_downloader { + downloader.reset() } } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index e2d0fd16f3..13daa56cab 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -273,29 +273,23 @@ impl StacksMessageCodec for HttpRequestPreamble { .map_err(CodecError::WriteError)?; // content-type - match self.content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(c.to_string().as_str().as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(ref c) = self.content_type { + fd.write_all("Content-Type: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(c.to_string().as_str().as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // content-length - match self.content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(l) = self.content_length { + fd.write_all("Content-Length: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(format!("{}", l).as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // keep-alive diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index a38f35c005..919d7ffa1c 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1232,25 +1232,22 @@ impl StacksHttp { /// This method will set up this state machine to consume the message associated with this /// premable, if the response is chunked. fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() && !self.allow_arbitrary_response { - return Err(NetError::DeserializeError( - "Unexpected HTTP response: no active request handler".to_string(), - )); + if let StacksHttpPreamble::Response(ref http_response_preamble) = preamble { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); } - if http_response_preamble.is_chunked() { - // we can only receive one response at a time - if self.reply.is_some() { - test_debug!("Have pending reply already"); - return Err(NetError::InProgress); - } - self.set_pending(http_response_preamble); - } + self.set_pending(http_response_preamble); } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 9b9e7b3682..449d2e26e7 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1534,15 +1534,12 @@ impl PeerNetwork { } // does the peer agree with our PoX view up to this reward cycle? - match stats.inv.pox_inv_cmp(&self.pox_id) { - Some((disagreed, _, _)) => { - if disagreed < target_block_reward_cycle { - // can't proceed - debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); - return Ok(0); - } + if let Some((disagreed, _, _)) = stats.inv.pox_inv_cmp(&self.pox_id) { + if disagreed < target_block_reward_cycle { + // can't proceed + debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); + return Ok(0); } - None => {} } let target_block_height = self @@ -2523,13 +2520,10 @@ impl PeerNetwork { let mut cur_neighbors = HashSet::new(); for (nk, event_id) in self.events.iter() { // only outbound authenticated peers - match self.peers.get(event_id) { - Some(convo) => { - if convo.is_outbound() && convo.is_authenticated() { - cur_neighbors.insert(nk.clone()); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.is_outbound() && convo.is_authenticated() { + cur_neighbors.insert(nk.clone()); } - None => {} } } @@ -2543,17 +2537,14 @@ impl PeerNetwork { /// Set a hint that we learned something new, and need to sync invs again pub fn hint_sync_invs(&mut self, target_height: u64) { - match self.inv_state { - Some(ref mut inv_state) => { - debug!( - "Awaken inv sync to re-scan peer block inventories at height {}", - target_height - ); - inv_state.hint_learned_data = true; - inv_state.hint_do_rescan = true; - inv_state.hint_learned_data_height = target_height; - } - None => {} + if let Some(ref mut inv_state) = self.inv_state { + debug!( + "Awaken inv sync to re-scan peer block inventories at height {}", + target_height + ); + inv_state.hint_learned_data = true; + inv_state.hint_do_rescan = true; + inv_state.hint_learned_data_height = target_height; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index cfefa2c5fe..a2461631a6 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2376,11 +2376,8 @@ pub mod test { if self.closed { return Ok(0); } - match self.read_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.read_error { + return Err(io::Error::from((*e).clone())); } let sz = self.c.read(buf)?; @@ -2403,11 +2400,8 @@ pub mod test { if self.closed { return Err(io::Error::from(ErrorKind::Other)); // EBADF } - match self.write_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.write_error { + return Err(io::Error::from((*e).clone())); } self.c.write(buf) } @@ -2799,11 +2793,8 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); @@ -3559,11 +3550,8 @@ pub mod test { ch: &ConsensusHash, ) { for op in blockstack_ops.iter_mut() { - match op { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - data.consensus_hash = (*ch).clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); } } } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index cc3fd73db8..f0d3cf18b7 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -388,11 +388,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier table size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier table size: {}", &self.local_peer, count); }; debug!("{:?}: Walk finished ===================", &self.local_peer); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 78c8982106..3180c3a3dd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1145,13 +1145,10 @@ impl PeerNetwork { ) -> u64 { let mut ret = 0; for (_, socket) in sockets.iter() { - match socket.peer_addr() { - Ok(addr) => { - if addr.ip() == ipaddr.ip() { - ret += 1; - } + if let Ok(addr) = socket.peer_addr() { + if addr.ip() == ipaddr.ip() { + ret += 1; } - Err(_) => {} }; } ret @@ -1378,12 +1375,9 @@ impl PeerNetwork { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { info!("Request to ban {:?}", neighbor_key); - match self.events.get(neighbor_key) { - Some(event_id) => { - debug!("Will ban {:?} (event {})", neighbor_key, event_id); - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor_key) { + debug!("Will ban {:?} (event {})", neighbor_key, event_id); + self.bans.insert(*event_id); } } Ok(()) @@ -1466,28 +1460,25 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { - match self.handles.get(i) { - Some(ref handle) => { - loop { - // drain all inbound requests - let inbound_request_res = handle.chan_in.try_recv(); - match inbound_request_res { - Ok(inbound_request) => { - messages.push((i, inbound_request)); - } - Err(TryRecvError::Empty) => { - // nothing to do - break; - } - Err(TryRecvError::Disconnected) => { - // dead; remove - to_remove.push(i); - break; - } + if let Some(ref handle) = self.handles.get(i) { + loop { + // drain all inbound requests + let inbound_request_res = handle.chan_in.try_recv(); + match inbound_request_res { + Ok(inbound_request) => { + messages.push((i, inbound_request)); + } + Err(TryRecvError::Empty) => { + // nothing to do + break; + } + Err(TryRecvError::Disconnected) => { + // dead; remove + to_remove.push(i); + break; } } } - None => {} } } @@ -1885,11 +1876,8 @@ impl PeerNetwork { /// Deregister a socket from our p2p network instance. fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { - match self.network { - Some(ref mut network) => { - let _ = network.deregister(event_id, &socket); - } - None => {} + if let Some(ref mut network) = self.network { + let _ = network.deregister(event_id, &socket); } } @@ -1969,11 +1957,8 @@ impl PeerNetwork { /// Deregister and ban a neighbor pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); - match self.events.get(neighbor) { - Some(event_id) => { - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor) { + self.bans.insert(*event_id); } self.relayer_stats.process_neighbor_ban(neighbor); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index f178ea719a..c58e1b210a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -322,18 +322,15 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(&event_id) { - Some(ref convo) => { - if !convo.stats.outbound { - let stats = convo.stats.clone(); - if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { - entry.push((*event_id, nk.clone(), stats)); - } else { - ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); - } + if let Some(ref convo) = self.peers.get(&event_id) { + if !convo.stats.outbound { + let stats = convo.stats.clone(); + if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { + entry.push((*event_id, nk.clone(), stats)); + } else { + ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); } } - None => {} } } @@ -378,15 +375,12 @@ impl PeerNetwork { let mut outbound: Vec = vec![]; for (nk, event_id) in self.events.iter() { - match self.peers.get(event_id) { - Some(convo) => { - if convo.stats.outbound { - outbound.push(format!("{:?}", &nk)); - } else { - inbound.push(format!("{:?}", &nk)); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.stats.outbound { + outbound.push(format!("{:?}", &nk)); + } else { + inbound.push(format!("{:?}", &nk)); } - None => {} } } (inbound, outbound) @@ -464,11 +458,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier size: {}", &self.local_peer, count); }; } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4569585b79..cadfb75f1e 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1826,52 +1826,49 @@ impl Relayer { &tx.txid(), &ast_rules ); - match tx.payload { - TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) => { - let clarity_version = - clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = + tx.payload + { + let clarity_version = + clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); + + if ast_rules == ASTRules::PrecheckSize { + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = QualifiedContractIdentifier::new( - issuer_principal, - smart_contract.name.clone(), - ); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 78f0f6fbb5..2459f64c00 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -429,56 +429,52 @@ impl HttpPeer { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; let recv_res = convo.recv(client_sock); - match recv_res { - Err(e) => { - match e { - net_error::PermanentlyDrained => { - // socket got closed, but we might still have pending unsolicited messages - debug!( - "Remote HTTP peer disconnected event {} (socket {:?})", - event_id, &client_sock - ); - convo_dead = true; - } - net_error::InvalidMessage => { - // got sent bad data. If this was an inbound conversation, send it a HTTP - // 400 and close the socket. - debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error(StacksHttpResponse::new_empty_error( - &HttpBadRequest::new( - "Received an HTTP message that the node could not decode" - .to_string(), - ), - )) { - Ok(_) => { - // prime the socket - if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - // convo_dead = true; - } - } - Err(e) => { + if let Err(e) = recv_res { + match e { + net_error::PermanentlyDrained => { + // socket got closed, but we might still have pending unsolicited messages + debug!( + "Remote HTTP peer disconnected event {} (socket {:?})", + event_id, &client_sock + ); + convo_dead = true; + } + net_error::InvalidMessage => { + // got sent bad data. If this was an inbound conversation, send it a HTTP + // 400 and close the socket. + debug!("Got a bad HTTP message on socket {:?}", &client_sock); + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode".to_string(), + ), + )) { + Ok(_) => { + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { debug!( - "Failed to reply HTTP 400 to socket {:?}: {:?}", + "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } + Err(e) => { + debug!( + "Failed to reply HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; + } } - _ => { - debug!( - "Failed to receive HTTP data on event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + } + _ => { + debug!( + "Failed to receive HTTP data on event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } - Ok(_) => {} } // react to inbound messages -- do we need to send something out, or fulfill requests @@ -730,11 +726,8 @@ mod test { peer.step().unwrap(); // asked to yield? - match http_rx.try_recv() { - Ok(_) => { - break; - } - Err(_) => {} + if let Ok(_) = http_rx.try_recv() { + break; } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index be35c4e1f1..a607298d74 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -218,7 +218,7 @@ fn test_walk_ring_15_org_biased() { let peers = test_walk_ring(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -226,11 +226,8 @@ fn test_walk_ring_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -398,7 +395,7 @@ fn test_walk_line_15_org_biased() { let peers = test_walk_line(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -406,11 +403,8 @@ fn test_walk_line_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -634,7 +628,7 @@ fn test_walk_star_15_org_biased() { let peers = test_walk_star(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -642,11 +636,8 @@ fn test_walk_star_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -849,14 +840,11 @@ fn dump_peers(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); } - None => {} } } @@ -882,16 +870,13 @@ fn dump_peer_histograms(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); } - None => {} } } for inbound in inbound_neighbor_index.iter() { @@ -1001,32 +986,26 @@ fn run_topology_test_ex( debug!("Step peer {:?}", &nk); // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } + if let Some(ref peer_list) = initial_allowed.get(&nk) { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); } } - None => {} }; // denied peers are never connected - match initial_denied.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } + if let Some(ref peer_list) = initial_denied.get(&nk) { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); } } - None => {} }; // all ports are unique in the p2p socket table diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5c13a12a50..e21ce19c35 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -171,20 +171,14 @@ fn test_get_block_availability() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -566,12 +560,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -847,12 +838,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -938,12 +926,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1012,12 +997,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1094,12 +1076,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1173,12 +1152,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1260,11 +1236,8 @@ pub fn test_get_blocks_and_microblocks_ban_url() { |_| {}, |peer| { let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + blocked = dl.blocked_urls.len(); } if blocked >= 1 { // NOTE: this is the success criterion @@ -1481,12 +1454,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 44a4bf3967..b2136bd1f0 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1390,22 +1390,16 @@ fn test_sync_inv_2_peers_plain() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -1553,46 +1547,38 @@ fn test_sync_inv_2_peers_stale() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { - if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - for i in 0..first_stacks_block_height { - assert!(!peer_2_inv.inv.has_ith_block(i)); - assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); - } - peer_2_check = true; + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if peer_2_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + for i in 0..first_stacks_block_height { + assert!(!peer_2_inv.inv.has_ith_block(i)); + assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); } + peer_2_check = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { - if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - peer_1_check = true; - } + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if peer_1_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + peer_1_check = true; } } - None => {} } round += 1; @@ -1703,54 +1689,48 @@ fn test_sync_inv_2_peers_unstable() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_1_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_1_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { - peer_1_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { - peer_1_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_1_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_1_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { + peer_1_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { + peer_1_block_cycle = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_2_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_2_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { - peer_2_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { - peer_2_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_2_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_2_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { + peer_2_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { + peer_2_block_cycle = true; } } - None => {} } round += 1; @@ -1917,42 +1897,30 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let _ = peer_2.step(); // peer 1 should see that peer 2 has all blocks for reward cycles 5 through 9 - match peer_1.network.inv_state { - Some(ref inv) => { - inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); - peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); + peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); }; // peer 2 should see that peer 1 has all blocks up to where we stopped feeding them to // it - match peer_2.network.inv_state { - Some(ref inv) => { - inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); - peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); + peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index c313ede598..94f1eb8124 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -1089,22 +1089,16 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .unwrap_or(0); // nothing should break - match peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match other_peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = other_peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1e3fa76cb..8c56b48b0d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -68,20 +68,14 @@ fn test_step_walk_1_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -184,22 +178,16 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; if let Some(s) = peer_1 @@ -306,20 +294,14 @@ fn test_step_walk_1_neighbor_denied() { walk_1_retries = peer_1.network.walk_retries; walk_2_retries = peer_2.network.walk_retries; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -400,20 +382,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { walk_1_retries = peer_1.network.walk_attempts; walk_2_retries = peer_2.network.walk_attempts; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -463,20 +439,14 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -573,23 +543,17 @@ fn test_step_walk_1_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 2 never gets added to peer 1's frontier - assert!(!w.frontier.contains_key(&neighbor_2.addr)); - } - None => {} + // peer 2 never gets added to peer 1's frontier + assert!(!w.frontier.contains_key(&neighbor_2.addr)); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -657,23 +621,17 @@ fn test_step_walk_1_neighbor_behind() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 1 never gets added to peer 2's frontier - assert!(!w.frontier.contains_key(&neighbor_1.addr)); - } - None => {} + // peer 1 never gets added to peer 2's frontier + assert!(!w.frontier.contains_key(&neighbor_1.addr)); }; i += 1; @@ -789,20 +747,14 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -942,20 +894,14 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; steps += 1; @@ -1091,20 +1037,14 @@ fn test_step_walk_2_neighbors_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -1371,28 +1311,19 @@ fn test_step_walk_3_neighbors_inbound() { ); test_debug!("========"); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_3.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_3.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; for (i, peer) in [&peer_1, &peer_2, &peer_3].iter().enumerate() { @@ -1542,20 +1473,14 @@ fn test_step_walk_2_neighbors_rekey() { let _ = peer_1.step(); let _ = peer_2.step(); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; } @@ -1649,20 +1574,14 @@ fn test_step_walk_2_neighbors_different_networks() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index ddf4e92598..8fdbfb846d 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1702,23 +1702,17 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut peer_0_to_1 = false; let mut peer_1_to_0 = false; for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } + if let Some(convo) = peers[0].network.peers.get(event_id) { + if *nk == peer_1_nk { + peer_0_to_1 = true; } - None => {} } } for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } + if let Some(convo) = peers[1].network.peers.get(event_id) { + if *nk == peer_0_nk { + peer_1_to_0 = true; } - None => {} } } @@ -3732,17 +3726,14 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( + if let Err(e) = node.chainstate.will_admit_mempool_tx( &sortdb.index_handle(&tip.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, versioned_contract_len as u64, ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} + panic!("will_admit_mempool_tx {:?}", &e); }; peer.sortdb = Some(sortdb); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e7f1c256a4..922332bedd 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -481,21 +481,18 @@ impl PeerNetwork { if need_block { // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); + if let Some(ref mut downloader) = self.block_downloader { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); - } - self.have_data_to_download = true; + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } - None => {} + self.have_data_to_download = true; } } } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 87031676db..af9a4d98a7 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -32,13 +32,10 @@ pub mod test { let mut done = false; while get_epoch_time_secs() <= deadline { sleep_ms(1000); - match rx.try_recv() { - Ok(success) => { - assert!(success); - done = true; - break; - } - Err(_) => {} + if let Ok(success) = rx.try_recv() { + assert!(success); + done = true; + break; } } From 34e34ef5f5ec489d5a3040bc866c5b49b9c3fc10 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:56:01 -0500 Subject: [PATCH 12/59] chore: Apply Clippy lint `redundant_pattern_matching` again --- stackslib/src/chainstate/stacks/db/mod.rs | 6 +++--- stackslib/src/chainstate/stacks/index/test/marf.rs | 4 ++-- stackslib/src/chainstate/stacks/tests/mod.rs | 4 ++-- stackslib/src/net/chat.rs | 2 +- stackslib/src/net/mod.rs | 2 +- stackslib/src/net/server.rs | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1d7c97b676..5821f47394 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,7 +2746,7 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2863,7 +2863,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2950,7 +2950,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 7102527ba8..a721b2dce4 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,7 +1282,7 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); @@ -1564,7 +1564,7 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - if let Err(_) = fs::metadata(&path) { + if fs::metadata(&path).is_err() { eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); return; }; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index d119dacd8e..29207dce6a 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,7 +338,7 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + if fs::metadata(&chainstate_path(new_test_name)).is_ok() { fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } @@ -1418,7 +1418,7 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 0ce27038cd..8fcc7cdf41 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3106,7 +3106,7 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index a2461631a6..0959d2ff35 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2793,7 +2793,7 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 2459f64c00..05d831ca7a 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -726,7 +726,7 @@ mod test { peer.step().unwrap(); // asked to yield? - if let Ok(_) = http_rx.try_recv() { + if http_rx.try_recv().is_ok() { break; } } From 9971d1a40d5f1accb013565e545633ad5489f589 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 24 Jan 2025 08:17:33 +0100 Subject: [PATCH 13/59] added wanr! when timeout --- .../src/nakamoto_node/signer_coordinator.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 818f1f6a08..ce136a05a2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -359,6 +359,12 @@ impl SignerCoordinator { } if rejections_timer.elapsed() > rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( "Gave up while tried reaching the threshold".into(), )); @@ -399,6 +405,12 @@ impl SignerCoordinator { ); return Ok(block_status.gathered_signatures.values().cloned().collect()); } else if rejections_timer.elapsed() > rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( "Gave up while tried reaching the threshold".into(), )); From 7e2d60e3f9e6759918e55cb490e60eb9b2e732e8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 12:09:50 -0500 Subject: [PATCH 14/59] chore: Address PR comments from Aaron --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +++++++++------------ stackslib/src/burnchains/tests/mod.rs | 4 ++-- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 2a19074aef..899c96390c 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,34 +264,31 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + s.set_nodelay(true).map_err(|e| { + test_debug!("Failed to set TCP_NODELAY: {e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP read timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP read timeout: {e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP write timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP write timeout: {e:?}"); btc_error::ConnectionError })?; - if let Some(s) = self.runtime.sock.take() { - let _ = s.shutdown(Shutdown::Both); + if let Some(s_old) = self.runtime.sock.replace(s) { + let _ = s_old.shutdown(Shutdown::Both); } - - self.runtime.sock = Some(s); Ok(()) } Err(_e) => { - let s = self.runtime.sock.take(); - if let Some(s) = s { + if let Some(s) = self.runtime.sock.take() { let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2f6d3112b7..c716f9f4e3 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -579,8 +579,8 @@ impl TestBurnchainBlock { pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); - for i in 0..self.txs.len() { - if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + for tx in self.txs.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = tx { assert_eq!(data.block_height, self.block_height); data.consensus_hash = parent_snapshot.consensus_hash.clone(); } From 82313d350276c5bc12e54c049a5421c70d8bc82e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 15:17:55 -0500 Subject: [PATCH 15/59] chore: Apply PR comment from Aaron --- stackslib/src/chainstate/stacks/tests/mod.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 77ebc89ff5..80b1d17a62 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -521,18 +521,14 @@ impl TestStacksNode { fork_tip: &BlockSnapshot, miner: &TestMiner, ) -> Option { - for commit_op in miner.block_commits.iter().rev() { - if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( + miner.block_commits.iter().rev().find_map(|commit_op| { + SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() - { - return Some(sn); - } - } - return None; + }) } pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { From c9cbd23dea29d065d1a05cc542a90f093af851bb Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 20:45:44 -0500 Subject: [PATCH 16/59] fix: Undo `_e` => `e` variable rename --- stackslib/src/burnchains/bitcoin/indexer.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 899c96390c..69ba63c240 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,21 +264,21 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|e| { - test_debug!("Failed to set TCP_NODELAY: {e:?}"); + s.set_nodelay(true).map_err(|_e| { + test_debug!("Failed to set TCP_NODELAY: {_e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP read timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP read timeout: {_e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP write timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP write timeout: {_e:?}"); btc_error::ConnectionError })?; From 45ae0153843a91788e7990e164762d22fa163e7e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 28 Jan 2025 18:12:15 +0000 Subject: [PATCH 17/59] initial prototype for configurable block_rejction timeout steps --- stackslib/src/config/mod.rs | 27 ++++++++++ .../src/nakamoto_node/signer_coordinator.rs | 50 ++++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++- 3 files changed, 64 insertions(+), 19 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 6d2d5e4389..b26676294f 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -36,6 +36,8 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use std::collections::BTreeMap; + use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; @@ -2156,6 +2158,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// + pub block_rejection_timeout_steps: BTreeMap, } impl Default for MinerConfig { @@ -2194,6 +2198,14 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + block_rejection_timeout_steps: { + let mut timeouts_btree = BTreeMap::::new(); + timeouts_btree.insert(0, Duration::from_secs(600)); + timeouts_btree.insert(1, Duration::from_secs(300)); + timeouts_btree.insert(2, Duration::from_secs(150)); + timeouts_btree.insert(3, Duration::from_secs(0)); + timeouts_btree + }, } } } @@ -2590,6 +2602,7 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub block_rejection_timeout_steps: Option>, } impl MinerConfigFile { @@ -2732,6 +2745,20 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + block_rejection_timeout_steps: { + if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { + let mut timeouts_btree = BTreeMap::::new(); + for (slice, millis) in block_rejection_timeout_items.iter() { + match slice.parse::() { + Ok(slice_slot) => timeouts_btree.insert(slice_slot, Duration::from_millis(*millis)), + Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) + }; + } + timeouts_btree + } else{ + miner_default_config.block_rejection_timeout_steps + } + } }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ce136a05a2..686d62a2d2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -38,6 +38,8 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; +use std::collections::BTreeMap; +use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -53,9 +55,6 @@ use crate::Config; pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = LazyLock::new(TestFlag::default); -/// Base timeout for rejections heuristic -pub static BLOCK_REJECTIONS_TIMEOUT_BASE: u64 = 600; - /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -81,6 +80,8 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, + /// + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -126,6 +127,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), + block_rejection_timeout_steps: config.miner.block_rejection_timeout_steps.clone(), }; // Spawn the signer DB listener thread @@ -311,9 +313,17 @@ impl SignerCoordinator { // this is used to track the start of the waiting cycle let mut rejections_timer = Instant::now(); // the amount of current rejections to eventually modify the timeout - let mut rejections: u32 = 0; + let mut rejections: u64 = 0; // default timeout - let mut rejections_timeout = Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + let mut rejections_timeout = self + .block_rejection_timeout_steps + .range((Included(0), Included(rejections))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { @@ -324,7 +334,7 @@ impl SignerCoordinator { block_signer_sighash, &mut block_status_tracker, rejections_timer, - rejections_timeout, + *rejections_timeout.1, EVENT_RECEIVER_POLL, )? { Some(status) => status, @@ -358,10 +368,10 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } - if rejections_timer.elapsed() > rejections_timeout { + if rejections_timer.elapsed() > *rejections_timeout.1 { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_timeout" => rejections_timeout.1.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); @@ -374,17 +384,21 @@ impl SignerCoordinator { } }; - if rejections != block_status.total_reject_weight { + if rejections != block_status.total_reject_weight as u64 { rejections_timer = Instant::now(); - rejections = block_status.total_reject_weight; - rejections_timeout = Duration::from_secs_f32( - BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), - ); + rejections = block_status.total_reject_weight as u64; + rejections_timeout = self + .block_rejection_timeout_steps + .range((Included(0), Included((rejections as f64 / self.total_weight as f64) as u64))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(rejections_timeout); + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout.1); } if block_status @@ -404,10 +418,10 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else if rejections_timer.elapsed() > rejections_timeout { + } else if rejections_timer.elapsed() > *rejections_timeout.1 { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_timeout" => rejections_timeout.1.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ddde8bd5da..ba10f9e792 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7826,7 +7826,11 @@ fn block_validation_check_rejection_timeout_heuristic() { |config| { config.block_proposal_validation_timeout = timeout; }, - |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config.miner.block_rejection_timeout_steps.insert(0, Duration::from_secs(600)); + config.miner.block_rejection_timeout_steps.insert(3, Duration::from_secs(17)); + }, None, None, ); From 3071d3601f951bd9f4e0ea14944e38fd1e840ca3 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:37:42 +0000 Subject: [PATCH 18/59] improved config system --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/config/mod.rs | 25 +++-- .../src/nakamoto_node/signer_coordinator.rs | 48 +++++--- testnet/stacks-node/src/tests/signer/v0.rs | 105 +++++++++++++++--- 4 files changed, 133 insertions(+), 46 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index bb64a1a8b7..41fb60d2e8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -133,6 +133,7 @@ jobs: - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::block_validation_check_rejection_timeout_heuristic - tests::signer::v0::block_validation_pending_table - tests::signer::v0::new_tenure_while_validating_previous_scenario - tests::signer::v0::tenure_extend_after_bad_commit diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b26676294f..2982273825 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2158,8 +2158,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, - /// - pub block_rejection_timeout_steps: BTreeMap, + /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2199,12 +2199,13 @@ impl Default for MinerConfig { tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), block_rejection_timeout_steps: { - let mut timeouts_btree = BTreeMap::::new(); - timeouts_btree.insert(0, Duration::from_secs(600)); - timeouts_btree.insert(1, Duration::from_secs(300)); - timeouts_btree.insert(2, Duration::from_secs(150)); - timeouts_btree.insert(3, Duration::from_secs(0)); - timeouts_btree + let mut rejections_timeouts_default_map = HashMap::::new(); + rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); + rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); + rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(60)); + rejections_timeouts_default_map.insert(31, Duration::from_secs(0)); + rejections_timeouts_default_map }, } } @@ -2747,14 +2748,14 @@ impl MinerConfigFile { tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), block_rejection_timeout_steps: { if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { - let mut timeouts_btree = BTreeMap::::new(); - for (slice, millis) in block_rejection_timeout_items.iter() { + let mut rejection_timeout_durations = HashMap::::new(); + for (slice, seconds) in block_rejection_timeout_items.iter() { match slice.parse::() { - Ok(slice_slot) => timeouts_btree.insert(slice_slot, Duration::from_millis(*millis)), + Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) }; } - timeouts_btree + rejection_timeout_durations } else{ miner_default_config.block_rejection_timeout_steps } diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 686d62a2d2..d04b306bfd 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -38,7 +38,7 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; @@ -81,7 +81,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// - block_rejection_timeout_steps: BTreeMap, + block_rejection_timeout_steps: HashMap, } impl SignerCoordinator { @@ -310,15 +310,20 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { - // this is used to track the start of the waiting cycle - let mut rejections_timer = Instant::now(); - // the amount of current rejections to eventually modify the timeout + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in self.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((self.total_weight as f64 / 100.0) * *percentage as f64) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + + // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; - // default timeout + // default timeout (the 0 entry must be always present) let mut rejections_timeout = self .block_rejection_timeout_steps - .range((Included(0), Included(rejections))) - .last() + .get(&rejections) .ok_or_else(|| { NakamotoNodeError::SigningCoordinatorFailure( "Invalid rejection timeout step function definition".into(), @@ -326,6 +331,9 @@ impl SignerCoordinator { })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); + + // this is used to track the start of the waiting cycle + let mut rejections_timer = Instant::now(); loop { // At every iteration wait for the block_status. // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) @@ -334,7 +342,7 @@ impl SignerCoordinator { block_signer_sighash, &mut block_status_tracker, rejections_timer, - *rejections_timeout.1, + *rejections_timeout, EVENT_RECEIVER_POLL, )? { Some(status) => status, @@ -368,10 +376,10 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } - if rejections_timer.elapsed() > *rejections_timeout.1 { + if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.1.as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); @@ -385,20 +393,24 @@ impl SignerCoordinator { }; if rejections != block_status.total_reject_weight as u64 { - rejections_timer = Instant::now(); rejections = block_status.total_reject_weight as u64; - rejections_timeout = self - .block_rejection_timeout_steps - .range((Included(0), Included((rejections as f64 / self.total_weight as f64) as u64))) + let rejections_timeout_tuple = block_rejection_timeout_steps + .range((Included(0), Included(rejections))) .last() .ok_or_else(|| { NakamotoNodeError::SigningCoordinatorFailure( "Invalid rejection timeout step function definition".into(), ) })?; + rejections_timeout = rejections_timeout_tuple.1; + info!("Number of received rejections updated, resetting timeout"; + "rejections" => rejections, + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); + rejections_timer = Instant::now(); #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout.1); + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout); } if block_status @@ -418,10 +430,10 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else if rejections_timer.elapsed() > *rejections_timeout.1 { + } else if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.1.as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ba10f9e792..b603a887a6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7813,7 +7813,7 @@ fn block_validation_check_rejection_timeout_heuristic() { } info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; + let num_signers = 20; let timeout = Duration::from_secs(30); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); @@ -7828,8 +7828,22 @@ fn block_validation_check_rejection_timeout_heuristic() { }, |config| { config.miner.block_rejection_timeout_steps.clear(); - config.miner.block_rejection_timeout_steps.insert(0, Duration::from_secs(600)); - config.miner.block_rejection_timeout_steps.insert(3, Duration::from_secs(17)); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(99)); }, None, None, @@ -7849,13 +7863,8 @@ fn block_validation_check_rejection_timeout_heuristic() { let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[4]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ - all_signers[0], - all_signers[1], - all_signers[2], - all_signers[3], - ]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..19].to_vec()); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -7865,17 +7874,19 @@ fn block_validation_check_rejection_timeout_heuristic() { .unwrap(); signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[4]]) + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 551); + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 123); info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[18], all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..18].to_vec()); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -7885,10 +7896,72 @@ fn block_validation_check_rejection_timeout_heuristic() { .unwrap(); signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 404); + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 20); + + info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[17], all_signers[18], all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..17].to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections( + timeout.as_secs(), + &[all_signers[17], all_signers[18], all_signers[19]], + ) + .unwrap(); + + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 10); + + info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![ + all_signers[16], + all_signers[17], + all_signers[18], + all_signers[19], + ]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..16].to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections( + timeout.as_secs(), + &[ + all_signers[16], + all_signers[17], + all_signers[18], + all_signers[19], + ], + ) + .unwrap(); + + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 99); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From f2ab500150f6fd87a760cb6399a1076f495eeb2b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:51:52 +0000 Subject: [PATCH 19/59] fmt-stacks --- stackslib/src/config/mod.rs | 4 +--- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 2982273825..0e30de5998 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -16,7 +16,7 @@ pub mod chain_data; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; @@ -36,8 +36,6 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use std::collections::BTreeMap; - use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index d04b306bfd..1e994b89af 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,6 +13,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; +use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; #[cfg(test)] use std::sync::LazyLock; @@ -38,8 +40,6 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; -use std::collections::{BTreeMap, HashMap}; -use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; From 5c0805a73a7e55a187f9911745b7a335ce0d13ef Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:58:01 +0000 Subject: [PATCH 20/59] fixed default rejections timeout --- stackslib/src/config/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 0e30de5998..dc79732486 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2201,8 +2201,7 @@ impl Default for MinerConfig { rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); - rejections_timeouts_default_map.insert(30, Duration::from_secs(60)); - rejections_timeouts_default_map.insert(31, Duration::from_secs(0)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, } From f22a9907ba62a76c5a807b677da6a396f1ed4eed Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:59:32 +0000 Subject: [PATCH 21/59] improved comment for rejections timeout --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1e994b89af..a62efb9570 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -80,7 +80,7 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, - /// + /// The tiemout configuration based on the percentage of rejections block_rejection_timeout_steps: HashMap, } From 4f4b58481dc43ba540e87b4d66376323cb583531 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:05:18 +0000 Subject: [PATCH 22/59] fixed typo --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index a62efb9570..32c428534f 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -80,7 +80,7 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, - /// The tiemout configuration based on the percentage of rejections + /// The timeout configuration based on the percentage of rejections block_rejection_timeout_steps: HashMap, } From 463839f3f8a0ab12ffed52673522ec0f288801bd Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:08:24 +0000 Subject: [PATCH 23/59] ensure 0 key is specified for rejections timeout --- stackslib/src/config/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index dc79732486..7897730d70 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2749,9 +2749,12 @@ impl MinerConfigFile { for (slice, seconds) in block_rejection_timeout_items.iter() { match slice.parse::() { Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), - Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) + Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) }; } + if !rejection_timeout_durations.contains_key(&0) { + panic!("block_rejection_timeout_steps requires a definition for the '0' key/step"); + } rejection_timeout_durations } else{ miner_default_config.block_rejection_timeout_steps From 0e7461c8c24455028841c4d19198117b97fa707d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:23:48 +0000 Subject: [PATCH 24/59] message typo --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 32c428534f..096f9eb74e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -384,7 +384,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while tried reaching the threshold".into(), + "Gave up while trying reaching the threshold".into(), )); } @@ -438,7 +438,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while tried reaching the threshold".into(), + "Gave up while trying reaching the threshold".into(), )); } else { continue; From 420f861f7e6acc8cd9bd2d07899254198ec16a14 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 10:22:19 +0000 Subject: [PATCH 25/59] use Counters instead of global test vars --- .../src/nakamoto_node/signer_coordinator.rs | 17 ++-- testnet/stacks-node/src/run_loop/neon.rs | 7 +- testnet/stacks-node/src/tests/signer/mod.rs | 5 ++ testnet/stacks-node/src/tests/signer/v0.rs | 80 ++++++++++++++++--- 4 files changed, 86 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 096f9eb74e..4132bb4284 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -16,8 +16,6 @@ use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; -#[cfg(test)] -use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -38,8 +36,6 @@ use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; -#[cfg(test)] -use stacks_common::util::tests::TestFlag; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -50,11 +46,6 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; -#[cfg(test)] -/// Test-only value for storing the current rejection based timeout -pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = - LazyLock::new(TestFlag::default); - /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -410,7 +401,13 @@ impl SignerCoordinator { rejections_timer = Instant::now(); #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout); + { + Counters::set( + &counters.naka_miner_current_rejections_timeout_secs, + rejections_timeout.as_secs(), + ); + Counters::set(&counters.naka_miner_current_rejections, rejections); + } } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index deead51066..174b4d40a6 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -115,6 +115,11 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, + #[cfg(test)] + pub naka_miner_current_rejections: RunLoopCounter, + #[cfg(test)] + pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, + #[cfg(test)] pub naka_skip_commit_op: TestFlag, } @@ -133,7 +138,7 @@ impl Counters { fn inc(_ctr: &RunLoopCounter) {} #[cfg(test)] - fn set(ctr: &RunLoopCounter, value: u64) { + pub fn set(ctr: &RunLoopCounter, value: u64) { ctr.0.store(value, Ordering::SeqCst); } diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 47958e8690..67ad402a86 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -98,6 +98,7 @@ pub struct RunningNodes { pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, + pub counters: Counters, } /// A test harness for running a v0 or v1 signer integration test @@ -943,6 +944,9 @@ fn setup_stx_btc_node( } = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); + + let run_loop_counters = run_loop.counters(); + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! @@ -978,5 +982,6 @@ fn setup_stx_btc_node( nakamoto_miner_directives: naka_miner_directives.0, coord_channel, conf: naka_conf, + counters: run_loop_counters, } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b603a887a6..ec818d34a0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -76,7 +76,6 @@ use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEM use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; -use crate::nakamoto_node::signer_coordinator::BLOCK_REJECTIONS_CURRENT_TIMEOUT; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; @@ -7877,9 +7876,23 @@ fn block_validation_check_rejection_timeout_heuristic() { .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 123); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 1) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 123 + ); info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); @@ -7899,9 +7912,23 @@ fn block_validation_check_rejection_timeout_heuristic() { .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 20); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 2) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 20 + ); info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); @@ -7924,9 +7951,24 @@ fn block_validation_check_rejection_timeout_heuristic() { ) .unwrap(); - thread::sleep(Duration::from_secs(3)); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 3) + }) + .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 10); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 10 + ); info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); @@ -7959,9 +8001,23 @@ fn block_validation_check_rejection_timeout_heuristic() { ) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 99); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 4) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 99 + ); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From 57f80fb79785e0994e565fb954e3ce628a9b3378 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 10:57:46 +0000 Subject: [PATCH 26/59] restored original wait_for_block_status --- .../src/nakamoto_node/signer_coordinator.rs | 18 ++++++++++--- .../src/nakamoto_node/stackerdb_listener.rs | 26 +++++++++---------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4132bb4284..8a05600771 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -331,12 +331,22 @@ impl SignerCoordinator { // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, - &mut block_status_tracker, - rejections_timer, - *rejections_timeout, EVENT_RECEIVER_POLL, + |status| { + if rejections_timer.elapsed() > *rejections_timeout { + return false; + } + if *status != block_status_tracker { + return false; + } + return true; + }, )? { - Some(status) => status, + Some(status) => { + // keep track of the last status + block_status_tracker = status.clone(); + status + } None => { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 92688c0075..5ac31c60d1 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -480,32 +480,30 @@ impl StackerDBListenerComms { /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. - /// If the block is found, return it. + /// If the block is found, call `condition` to check if the block status + /// satisfies the condition. + /// If the condition is satisfied, return the block status as + /// `Ok(Some(status))`. + /// If the condition is not satisfied, wait for it to be satisfied. /// If the timeout is reached, return `Ok(None)`. - pub fn wait_for_block_status( + pub fn wait_for_block_status( &self, block_signer_sighash: &Sha512Trunc256Sum, - block_status_tracker: &mut BlockStatus, - rejections_timer: std::time::Instant, - rejections_timeout: Duration, timeout: Duration, - ) -> Result, NakamotoNodeError> { + condition: F, + ) -> Result, NakamotoNodeError> + where + F: Fn(&BlockStatus) -> bool, + { let (lock, cvar) = &*self.blocks; let blocks = lock.lock().expect("FATAL: failed to lock block status"); let (guard, timeout_result) = cvar .wait_timeout_while(blocks, timeout, |map| { - if rejections_timer.elapsed() > rejections_timeout { - return true; - } let Some(status) = map.get(block_signer_sighash) else { return true; }; - if status != block_status_tracker { - *block_status_tracker = status.clone(); - return false; - } - return true; + condition(status) }) .expect("FATAL: failed to wait on block status cond var"); From d5abfdc0daa73ba8aaf9b67b1bb61c9f0cc17cd4 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:00:53 +0000 Subject: [PATCH 27/59] removed empty line --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 5ac31c60d1..ddaa14a8fd 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -376,7 +376,6 @@ impl StackerDBListener { } }; block.responded_signers.insert(rejected_pubkey); - block.total_reject_weight = block .total_reject_weight .checked_add(signer_entry.weight) From 5269714c1438257d9a32aa57f2c84542864f3fed Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:06:20 +0000 Subject: [PATCH 28/59] updated CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226f7b5159..7d9ecb3b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response ## [3.1.0.0.4] From 5a70c4397446a873522a2f3bb2bfbc8b389a6f93 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:07:00 +0000 Subject: [PATCH 29/59] updated CHANGELOG --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d9ecb3b16..57f940fa31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) -- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response (#5705) ## [3.1.0.0.4] From c424212db56a076fa482dabf920c45b4a10a6f30 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:13:17 +0000 Subject: [PATCH 30/59] Secp256k1PrivateKey::random() instrad of new() --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2c483c523c..2c98c11e67 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7899,7 +7899,7 @@ fn block_validation_check_rejection_timeout_heuristic() { info!("------------------------- Test Setup -------------------------"); let num_signers = 20; let timeout = Duration::from_secs(30); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; From d71329003e3e697d2e36e9b279bef2d89954f8db Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:30:52 +0000 Subject: [PATCH 31/59] removed useless test attributes --- .../src/nakamoto_node/signer_coordinator.rs | 13 +++++-------- testnet/stacks-node/src/run_loop/neon.rs | 4 +--- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 2de14708f2..ae68c22cc5 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -410,14 +410,11 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); rejections_timer = Instant::now(); - #[cfg(test)] - { - Counters::set( - &counters.naka_miner_current_rejections_timeout_secs, - rejections_timeout.as_secs(), - ); - Counters::set(&counters.naka_miner_current_rejections, rejections); - } + Counters::set( + &counters.naka_miner_current_rejections_timeout_secs, + rejections_timeout.as_secs(), + ); + Counters::set(&counters.naka_miner_current_rejections, rejections); } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 89973aa9aa..01813b6812 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -117,9 +117,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, - #[cfg(test)] pub naka_miner_current_rejections: RunLoopCounter, - #[cfg(test)] pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, #[cfg(test)] @@ -145,7 +143,7 @@ impl Counters { } #[cfg(not(test))] - fn set(_ctr: &RunLoopCounter, _value: u64) {} + pub fn set(_ctr: &RunLoopCounter, _value: u64) {} pub fn bump_blocks_processed(&self) { Counters::inc(&self.blocks_processed); From adaabe60570acb199e577c11c633eb97078af115 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:47:49 +0000 Subject: [PATCH 32/59] use u32 for block_rejection_timeout_steps keys --- stackslib/src/config/mod.rs | 8 ++++---- .../src/nakamoto_node/signer_coordinator.rs | 20 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 3fa2d3a005..b68e55bd03 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2156,7 +2156,7 @@ pub struct MinerConfig { /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections - pub block_rejection_timeout_steps: HashMap, + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2196,7 +2196,7 @@ impl Default for MinerConfig { tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), block_rejection_timeout_steps: { - let mut rejections_timeouts_default_map = HashMap::::new(); + let mut rejections_timeouts_default_map = HashMap::::new(); rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); @@ -2744,9 +2744,9 @@ impl MinerConfigFile { tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), block_rejection_timeout_steps: { if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { - let mut rejection_timeout_durations = HashMap::::new(); + let mut rejection_timeout_durations = HashMap::::new(); for (slice, seconds) in block_rejection_timeout_items.iter() { - match slice.parse::() { + match slice.parse::() { Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) }; diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ae68c22cc5..1e2dba7f05 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -72,7 +72,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: HashMap, + block_rejection_timeout_steps: HashMap, } impl SignerCoordinator { @@ -305,21 +305,21 @@ impl SignerCoordinator { let mut block_rejection_timeout_steps = BTreeMap::::new(); for (percentage, duration) in self.block_rejection_timeout_steps.iter() { let rejections_amount = - ((self.total_weight as f64 / 100.0) * *percentage as f64) as u64; + ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; block_rejection_timeout_steps.insert(rejections_amount, *duration); } // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) - let mut rejections_timeout = self - .block_rejection_timeout_steps - .get(&rejections) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Invalid rejection timeout step function definition".into(), - ) - })?; + let mut rejections_timeout = + block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); From 7cfbfddb8b0034e40b9fd4edf766655d76260868 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:54:04 +0000 Subject: [PATCH 33/59] refactored block_rejection_timeout_steps percentage setup --- .../src/nakamoto_node/signer_coordinator.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1e2dba7f05..dfddd0a7b2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -279,12 +279,21 @@ impl SignerCoordinator { } } + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in self.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + self.get_block_status( &block.header.signer_signature_hash(), &block.block_id(), chain_state, sortdb, counters, + &block_rejection_timeout_steps, ) } @@ -300,15 +309,8 @@ impl SignerCoordinator { chain_state: &mut StacksChainState, sortdb: &SortitionDB, counters: &Counters, + block_rejection_timeout_steps: &BTreeMap, ) -> Result, NakamotoNodeError> { - // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); - for (percentage, duration) in self.block_rejection_timeout_steps.iter() { - let rejections_amount = - ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; - block_rejection_timeout_steps.insert(rejections_amount, *duration); - } - // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) From d4171d7cce23cee936319a97fe40b965b90470c4 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 16:05:10 +0000 Subject: [PATCH 34/59] do not reset rejections_timer on block_status updates --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index dfddd0a7b2..eb213710e2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -326,7 +326,7 @@ impl SignerCoordinator { let mut block_status_tracker = BlockStatus::default(); // this is used to track the start of the waiting cycle - let mut rejections_timer = Instant::now(); + let rejections_timer = Instant::now(); loop { // At every iteration wait for the block_status. // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) @@ -410,7 +410,6 @@ impl SignerCoordinator { "rejections" => rejections, "rejections_timeout" => rejections_timeout.as_secs(), "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - rejections_timer = Instant::now(); Counters::set( &counters.naka_miner_current_rejections_timeout_secs, From fbc3b7d34124acbc2b3d9ced95dacf5a475c22b5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 17:38:08 +0000 Subject: [PATCH 35/59] moved block_rejection_timeout_steps computation directly in SignerCoordinator::new() --- .../src/nakamoto_node/signer_coordinator.rs | 43 +++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index eb213710e2..1fdb609ad1 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; @@ -72,7 +72,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: HashMap, + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -108,6 +108,14 @@ impl SignerCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + let mut sc = Self { message_key, is_mainnet, @@ -118,7 +126,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), - block_rejection_timeout_steps: config.miner.block_rejection_timeout_steps.clone(), + block_rejection_timeout_steps, }; // Spawn the signer DB listener thread @@ -279,21 +287,12 @@ impl SignerCoordinator { } } - // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); - for (percentage, duration) in self.block_rejection_timeout_steps.iter() { - let rejections_amount = - ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; - block_rejection_timeout_steps.insert(rejections_amount, *duration); - } - self.get_block_status( &block.header.signer_signature_hash(), &block.block_id(), chain_state, sortdb, counters, - &block_rejection_timeout_steps, ) } @@ -309,19 +308,18 @@ impl SignerCoordinator { chain_state: &mut StacksChainState, sortdb: &SortitionDB, counters: &Counters, - block_rejection_timeout_steps: &BTreeMap, ) -> Result, NakamotoNodeError> { // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) - let mut rejections_timeout = - block_rejection_timeout_steps - .get(&rejections) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Invalid rejection timeout step function definition".into(), - ) - })?; + let mut rejections_timeout = self + .block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); @@ -397,7 +395,8 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight as u64 { rejections = block_status.total_reject_weight as u64; - let rejections_timeout_tuple = block_rejection_timeout_steps + let rejections_timeout_tuple = self + .block_rejection_timeout_steps .range((Included(0), Included(rejections))) .last() .ok_or_else(|| { From 790c1e1db2bc85accccb4ba0b1fa500de2a30fdb Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 10:39:36 +0000 Subject: [PATCH 36/59] merged with develop, reintroduces old logic for condition variables, improved rejections comparison --- .../src/nakamoto_node/signer_coordinator.rs | 19 +++++++------------ .../src/nakamoto_node/stackerdb_listener.rs | 16 ++++++++++++---- testnet/stacks-node/src/tests/signer/mod.rs | 4 ---- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1fdb609ad1..ec827033ba 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -40,9 +40,7 @@ use stacks::util_lib::boot::boot_code_id; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::stackerdb_listener::{ - BlockStatus, StackerDBListener, EVENT_RECEIVER_POLL, -}; +use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; use crate::neon::Counters; use crate::Config; @@ -320,8 +318,6 @@ impl SignerCoordinator { "Invalid rejection timeout step function definition".into(), ) })?; - // this is used for comparing block_status to identify if it has been changed from the previous event - let mut block_status_tracker = BlockStatus::default(); // this is used to track the start of the waiting cycle let rejections_timer = Instant::now(); @@ -333,20 +329,19 @@ impl SignerCoordinator { block_signer_sighash, EVENT_RECEIVER_POLL, |status| { + // rejections-based timeout expired? if rejections_timer.elapsed() > *rejections_timeout { return false; } - if *status != block_status_tracker { + // number or rejections changed? + if status.total_reject_weight as u64 != rejections { return false; } - return true; + // enough signatures? + return status.total_weight_signed < self.weight_threshold; }, )? { - Some(status) => { - // keep track of the last status - block_status_tracker = status.clone(); - status - } + Some(status) => status, None => { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index ddaa14a8fd..cf9ce4b6d7 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -337,8 +337,10 @@ impl StackerDBListener { block.gathered_signatures.insert(slot_id, signature); block.responded_signers.insert(signer_pubkey); - // Signal to anyone waiting on this block that we have a new status - cvar.notify_all(); + if block.total_weight_signed >= self.weight_threshold { + // Signal to anyone waiting on this block that we have enough signatures + cvar.notify_all(); + } // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -394,8 +396,14 @@ impl StackerDBListener { "server_version" => rejected_data.metadata.server_version, ); - // Signal to anyone waiting on this block that we have a new status - cvar.notify_all(); + if block + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + // Signal to anyone waiting on this block that we have enough rejections + cvar.notify_all(); + } // Update the idle timestamp for this signer self.update_idle_timestamp( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 836a9eee0a..a68a4c77fb 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -86,7 +86,6 @@ pub struct RunningNodes { pub counters: Counters, pub coord_channel: Arc>, pub conf: NeonConfig, - pub counters: Counters, } /// A test harness for running a v0 or v1 signer integration test @@ -939,8 +938,6 @@ fn setup_stx_btc_node( let coord_channel = run_loop.coordinator_channels(); - let run_loop_counters = run_loop.counters(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! @@ -978,6 +975,5 @@ fn setup_stx_btc_node( coord_channel, counters, conf: naka_conf, - counters: run_loop_counters, } } From 54aed16067a48c71dada0cd1fa8067d0866b143a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 10:41:03 +0000 Subject: [PATCH 37/59] rollback BlockStatus Default and PartialEq --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index cf9ce4b6d7..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -50,7 +50,7 @@ pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFla /// waking up to check timeouts? pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -#[derive(Debug, Clone, Default, PartialEq)] +#[derive(Debug, Clone)] pub struct BlockStatus { pub responded_signers: HashSet, pub gathered_signatures: BTreeMap, From fd09625cd4febaa716da8e024ea1cb0d08aa2352 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 07:40:26 -0600 Subject: [PATCH 38/59] test: fix flake in multiple_miners_empty_sortition --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2e71cb8910..30b61477cd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -11737,13 +11737,17 @@ fn multiple_miners_empty_sortition() { // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); + signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); From 3ea5c858fa3d089095cd45bf737141c4e6c646c5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 09:15:06 -0600 Subject: [PATCH 39/59] test: refactor nakamoto tests to use better wait-for-commit logic --- .../src/tests/nakamoto_integrations.rs | 374 +++++------------- testnet/stacks-node/src/tests/signer/mod.rs | 13 +- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++- 3 files changed, 120 insertions(+), 316 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 71775742d0..0c60902fa0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -728,14 +728,14 @@ pub fn next_block_and_process_new_stacks_block( pub fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], true, ) } @@ -745,14 +745,14 @@ pub fn next_block_and_mine_commit( pub fn next_block_and_commits_only( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], false, ) } @@ -765,98 +765,48 @@ pub fn next_block_and_commits_only( pub fn next_block_and_wait_for_commits( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &[&Arc>], - commits_submitted: &[&Arc], + node_confs: &[&Config], + node_counters: &[&Counters], wait_for_stacks_block: bool, ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.to_vec(); - let blocks_processed_before: Vec<_> = coord_channels + let infos_before: Vec<_> = node_confs.iter().map(|c| get_chain_info(c)).collect(); + let burn_ht_before = infos_before .iter() - .map(|x| { - x.lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed() - }) + .map(|info| info.burn_block_height) + .max() + .unwrap(); + let stacks_ht_before = infos_before + .iter() + .map(|info| info.stacks_tip_height) + .max() + .unwrap(); + let last_commit_burn_hts: Vec<_> = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_burn_height) .collect(); - let commits_before: Vec<_> = commits_submitted + let last_commit_stacks_hts: Vec<_> = node_counters .iter() - .map(|x| x.load(Ordering::SeqCst)) + .map(|c| &c.naka_submitted_commit_last_stacks_tip) .collect(); - let mut block_processed_time: Vec> = vec![None; commits_before.len()]; - let mut commit_sent_time: Vec> = vec![None; commits_before.len()]; next_block_and(btc_controller, timeout_secs, || { - for i in 0..commits_submitted.len() { - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { - block_processed_time[i].replace(now); - } - if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { - commit_sent_time[i].replace(now); - } - } - + let burn_height_committed_to = last_commit_burn_hts.iter().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { - for i in 0..commits_submitted.len() { - // just wait for the commit - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - if commits_sent <= commits_before[i] { - return Ok(false); - } - - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 1 { - continue; - } - return Ok(false); - } - return Ok(true); - } - - // waiting for both commit and stacks block - for i in 0..commits_submitted.len() { - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - - if blocks_processed > blocks_processed_before[i] { - // either we don't care about the stacks block count, or the block count advanced. - // Check the block-commits. - let block_processed_time = block_processed_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed block time wasn't set")?; - if commits_sent <= commits_before[i] { - return Ok(false); - } - let commit_sent_time = commit_sent_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed commit time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - continue; - } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 2 { - continue; - } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - continue; - } - return Ok(false); - } else { + Ok(burn_height_committed_to) + } else { + if !burn_height_committed_to { return Ok(false); } + let stacks_tip_committed_to = + last_commit_stacks_hts + .iter() + .all(|last_commit_stacks_height| { + last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before + }); + return Ok(stacks_tip_committed_to); } - Ok(true) }) } @@ -1541,6 +1491,7 @@ fn simple_neon_integration() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let node_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -1598,13 +1549,8 @@ fn simple_neon_integration() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // Submit a TX @@ -1652,13 +1598,8 @@ fn simple_neon_integration() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1794,17 +1735,17 @@ fn restarting_miner() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let rl1_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); let _run_loop_2_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed: blocks_processed_2, - naka_submitted_commits: commits_submitted_2, naka_proposed_blocks: proposals_submitted_2, .. } = run_loop_2.counters(); - let coord_channel_2 = run_loop_2.coordinator_channels(); + let rl2_counters = run_loop.counters(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); @@ -1846,13 +1787,8 @@ fn restarting_miner() { // Mine 2 nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl1_counters) + .unwrap(); } let last_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1915,13 +1851,8 @@ fn restarting_miner() { // Mine 2 more nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel_2, - &commits_submitted_2, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl2_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2020,6 +1951,7 @@ fn flash_blocks_on_epoch_3() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2094,13 +2026,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // Submit a TX @@ -2136,13 +2062,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2719,6 +2639,7 @@ fn correct_burn_outs() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2891,12 +2812,9 @@ fn correct_burn_outs() { let prior_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - if let Err(e) = next_block_and_mine_commit( - &mut btc_regtest_controller, - 30, - &coord_channel, - &commits_submitted, - ) { + if let Err(e) = + next_block_and_mine_commit(&mut btc_regtest_controller, 30, &naka_conf, &counters) + { warn!( "Error while minting a bitcoin block and waiting for stacks-node activity: {e:?}" ); @@ -3036,6 +2954,7 @@ fn block_proposal_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3076,13 +2995,7 @@ fn block_proposal_api_endpoint() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -3414,6 +3327,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3434,13 +3348,7 @@ fn miner_writes_proposed_block_to_stackerdb() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -3525,6 +3433,7 @@ fn vote_for_aggregate_key_burn_op() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3607,13 +3516,7 @@ fn vote_for_aggregate_key_burn_op() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -3663,13 +3566,7 @@ fn vote_for_aggregate_key_burn_op() { // the second block should process the vote, after which the vote should be set for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let mut vote_for_aggregate_key_found = false; @@ -4631,6 +4528,7 @@ fn burn_ops_integration_test() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -4682,13 +4580,7 @@ fn burn_ops_integration_test() { "Pre-stx operation should submit successfully" ); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); @@ -4816,13 +4708,7 @@ fn burn_ops_integration_test() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -4979,13 +4865,7 @@ fn burn_ops_integration_test() { // the second block should process the ops // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); for interim_block_ix in 0..2 { info!("Mining interim block {interim_block_ix}"); let blocks_processed_before = coord_channel @@ -6032,6 +5912,7 @@ fn nakamoto_attempt_time() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6071,13 +5952,7 @@ fn nakamoto_attempt_time() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -6603,6 +6478,7 @@ fn signer_chainstate() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6680,13 +6556,7 @@ fn signer_chainstate() { // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; for i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { @@ -7197,6 +7067,7 @@ fn continue_tenure_extend() { naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7258,13 +7129,7 @@ fn continue_tenure_extend() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); wait_for(5, || { let blocks_processed = coord_channel @@ -8728,6 +8593,7 @@ fn check_block_info_rewards() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -8926,13 +8792,7 @@ fn check_block_info_rewards() { // (only 2 blocks maturation time in tests) info!("Mining 6 tenures to mature the block reward"); for i in 0..6 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 20, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 20, &naka_conf, &counters).unwrap(); info!("Mined a block ({i})"); } @@ -9498,10 +9358,10 @@ fn v3_signer_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -9562,13 +9422,7 @@ fn v3_signer_api_endpoint() { // Mine some nakamoto tenures for _i in 0..naka_tenures { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -9674,7 +9528,7 @@ fn v3_blockbyheight_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9696,13 +9550,7 @@ fn v3_blockbyheight_api_endpoint() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -9797,11 +9645,10 @@ fn nakamoto_lockup_events() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9832,13 +9679,7 @@ fn nakamoto_lockup_events() { info!("------------------------- Setup finished, run test -------------------------"); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -9982,6 +9823,7 @@ fn skip_mining_long_tx() { naka_mined_blocks: mined_naka_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -10020,13 +9862,7 @@ fn skip_mining_long_tx() { // Mine a few nakamoto tenures with some interim blocks in them for i in 0..5 { let mined_before = mined_naka_blocks.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); if i == 0 { // we trigger the nakamoto miner to evaluate the long running transaction, @@ -10134,17 +9970,10 @@ fn test_shadow_recovery() { let naka_conf = signer_test.running_nodes.conf.clone(); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; - let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let counters = signer_test.running_nodes.counters.clone(); // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -10219,13 +10048,7 @@ fn test_shadow_recovery() { } // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // all shadow blocks are present and processed let mut shadow_ids = HashSet::new(); @@ -10950,6 +10773,7 @@ fn test_tenure_extend_from_flashblocks() { let coord_channel = signer_test.running_nodes.coord_channel.clone(); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); + let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -10969,13 +10793,7 @@ fn test_tenure_extend_from_flashblocks() { .unwrap(); for _ in 0..3 { - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let burn_view_contract = r#" @@ -11021,24 +10839,12 @@ fn test_tenure_extend_from_flashblocks() { }) .expect("Timed out waiting for interim blocks to be mined"); - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // stall miner and relayer // make tenure but don't wait for a stacks block - next_block_and_commits_only( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // prevent the mienr from sending another block-commit nakamoto_test_skip_commit_op.set(true); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 1bf57444ed..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -336,15 +336,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], - commits_submitted: &[&Arc], + node_confs: &[&NeonConfig], + node_counters: &[&Counters], timeout: Duration, ) { let blocks_len = test_observer::get_blocks().len(); @@ -370,8 +369,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest rl1_commits_before) + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); From cade53755e97e2b7f4198c778e3e63dfb7c1f120 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 31 Jan 2025 10:37:40 -0500 Subject: [PATCH 40/59] fix: Disable flaky test `flash_blocks_on_epoch_3` --- .github/workflows/bitcoin-tests.yml | 3 ++- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 13d94438f9..57b37f44e7 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,7 +83,8 @@ jobs: - tests::neon_integrations::start_stop_bitcoind - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::flash_blocks_on_epoch_3 + # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition + - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 71775742d0..c9e8f6e161 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1968,6 +1968,7 @@ fn restarting_miner() { #[test] #[ignore] +#[allow(non_snake_case)] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1977,7 +1978,12 @@ fn restarting_miner() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn flash_blocks_on_epoch_3() { +/// +/// NOTE: This test has been disabled because it's flaky, and we don't need to +/// test the Epoch 3 transition since it's already happened +/// +/// See issue [#5765](https://github.com/stacks-network/stacks-core/issues/5765) for details +fn flash_blocks_on_epoch_3_FLAKY() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From eb883ee9eef299631e8adda64d4c5a05de804c5a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 15:42:24 +0000 Subject: [PATCH 41/59] improved error messages on timeout --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ec827033ba..4adba66be1 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -380,7 +380,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while trying reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); } @@ -437,7 +437,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while trying reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); } else { continue; From bbc6ab82cc6aa76954d38cb6b0907eaa91204201 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 10:52:45 -0600 Subject: [PATCH 42/59] fix revealed flake in restarting_miner and test_shadow_recovery --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0c60902fa0..3a75e6b630 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1745,7 +1745,7 @@ fn restarting_miner() { naka_proposed_blocks: proposals_submitted_2, .. } = run_loop_2.counters(); - let rl2_counters = run_loop.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); @@ -1832,13 +1832,16 @@ fn restarting_miner() { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - Ok(tip.stacks_block_height > last_tip.stacks_block_height) + let stacks_tip_committed_to = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(tip.stacks_block_height > last_tip.stacks_block_height + && stacks_tip_committed_to > last_tip.stacks_block_height) }) .unwrap_or_else(|e| { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - error!( "Failed to get a new block after restart"; "last_tip_height" => last_tip.stacks_block_height, @@ -10043,8 +10046,7 @@ fn test_shadow_recovery() { // revive ATC-C by waiting for commits for _i in 0..4 { - btc_regtest_controller.bootstrap_chain(1); - sleep_ms(30_000); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // make another tenure From 8803ff15582c63a360456ee5db0f61c5c71d01fc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 29 Jan 2025 16:30:21 -0500 Subject: [PATCH 43/59] feat: do not issue a time-based tenure extend earlier than needed With this change, a miner will not issue a time-based tenure extend if it has used less than X% of the tenure budget, where X can be specified in the `tenure_extend_cost_threshold` config option. --- CHANGELOG.md | 5 ++ stackslib/src/chainstate/nakamoto/miner.rs | 36 +++++++---- stackslib/src/cli.rs | 18 +++++- stackslib/src/config/mod.rs | 8 ++- .../stacks-node/src/nakamoto_node/miner.rs | 63 ++++++++++++++----- 5 files changed, 99 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226f7b5159..7ec05bb51e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added + +- Miner config option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted + ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Miner will not issue a tenure extend until at least half of the block budget has been spent (#5757) ## [3.1.0.0.4] diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index a36e52512d..0156adbc96 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,6 +150,14 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +pub struct BlockMetadata { + pub block: NakamotoBlock, + pub tenure_consumed: ExecutionCost, + pub tenure_budget: ExecutionCost, + pub tenure_size: u64, + pub tx_events: Vec, +} + impl NakamotoBlockBuilder { /// Make a block builder from genesis (testing only) pub fn new_first_block( @@ -526,7 +534,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_bitvec_len: u16, - ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + ) -> Result { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -556,7 +564,7 @@ impl NakamotoBlockBuilder { builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_info.cause())?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; - let block_limit = tenure_tx + let tenure_budget = tenure_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); @@ -570,7 +578,7 @@ impl NakamotoBlockBuilder { (1..=100).contains(&percentage), "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" ); - let mut remaining_limit = block_limit.clone(); + let mut remaining_limit = tenure_budget.clone(); let cost_so_far = tenure_tx.cost_so_far(); if remaining_limit.sub(&cost_so_far).is_ok() && remaining_limit.divide(100).is_ok() { remaining_limit.multiply(percentage.into()).expect( @@ -581,7 +589,7 @@ impl NakamotoBlockBuilder { "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; "remaining_limit" => %remaining_limit, "cost_so_far" => %cost_so_far, - "block_limit" => %block_limit, + "block_limit" => %tenure_budget, ); soft_limit = Some(remaining_limit); }; @@ -630,13 +638,13 @@ impl NakamotoBlockBuilder { // save the block so we can build microblocks off of it let block = builder.mine_nakamoto_block(&mut tenure_tx); - let size = builder.bytes_so_far; - let consumed = builder.tenure_finish(tenure_tx)?; + let tenure_size = builder.bytes_so_far; + let tenure_consumed = builder.tenure_finish(tenure_tx)?; let ts_end = get_epoch_time_ms(); set_last_mined_block_transaction_count(block.txs.len() as u64); - set_last_mined_execution_cost_observed(&consumed, &block_limit); + set_last_mined_execution_cost_observed(&tenure_consumed, &tenure_budget); info!( "Miner: mined Nakamoto block"; @@ -645,14 +653,20 @@ impl NakamotoBlockBuilder { "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, - "block_size" => size, - "execution_consumed" => %consumed, - "percent_full" => block_limit.proportion_largest_dimension(&consumed), + "block_size" => tenure_size, + "execution_consumed" => %tenure_consumed, + "percent_full" => tenure_budget.proportion_largest_dimension(&tenure_consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); - Ok((block, consumed, size, tx_events)) + Ok(BlockMetadata { + block, + tenure_consumed, + tenure_budget, + tenure_size, + tx_events, + }) } pub fn get_bytes_so_far(&self) -> u64 { diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 286e7f1854..f43812f2ba 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::OnChainRewardSetProvider; -use crate::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; +use crate::chainstate::nakamoto::miner::{BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; @@ -504,7 +504,21 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { None, 0, ) - .map(|(block, cost, size, _)| (block.header.block_hash(), block.txs, cost, size)) + .map( + |BlockMetadata { + block, + tenure_consumed, + tenure_size, + .. + }| { + ( + block.header.block_hash(), + block.txs, + tenure_consumed, + tenure_size, + ) + }, + ) } }; diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b9b9bf5204..3684b16a32 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -97,7 +97,8 @@ const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; // This should be greater than the signers' timeout. This is used for issuing fallback tenure extends -const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -2155,6 +2156,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// Percentage of block budget that must be used before attempting a time-based tenure extend + pub tenure_extend_cost_threshold: u64, } impl Default for MinerConfig { @@ -2193,6 +2196,7 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + tenure_extend_cost_threshold: DEFAULT_TENURE_EXTEND_COST_THRESHOLD, } } } @@ -2589,6 +2593,7 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub tenure_extend_cost_threshold: Option, } impl MinerConfigFile { @@ -2731,6 +2736,7 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + tenure_extend_cost_threshold: self.tenure_extend_cost_threshold.unwrap_or(miner_default_config.tenure_extend_cost_threshold), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb233737bb..16b33ead7a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,6 +21,7 @@ use std::thread; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; @@ -177,6 +178,10 @@ pub struct BlockMinerThread { last_block_mined: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, + /// Cost consumed by the current tenure + tenure_cost: ExecutionCost, + /// Cost budget for the current tenure + tenure_budget: ExecutionCost, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -237,6 +242,8 @@ impl BlockMinerThread { burn_tip_at_start: burn_tip_at_start.clone(), tenure_change_time: Instant::now(), abort_flag: Arc::new(AtomicBool::new(false)), + tenure_cost: ExecutionCost::ZERO, + tenure_budget: ExecutionCost::ZERO, } } @@ -1183,7 +1190,7 @@ impl BlockMinerThread { } // build the block itself - let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( + let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db .index_handle_at_ch(&self.burn_block.consensus_hash) @@ -1210,39 +1217,48 @@ impl BlockMinerThread { e })?; - if block.txs.is_empty() { + if block_metadata.block.txs.is_empty() { return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key - .sign(block.header.miner_signature_hash().as_bytes()) + .sign( + block_metadata + .block + .header + .miner_signature_hash() + .as_bytes(), + ) .map_err(NakamotoNodeError::MinerSignatureError)?; - block.header.miner_signature = miner_signature; + block_metadata.block.header.miner_signature = miner_signature; info!( "Miner: Assembled block #{} for signer set proposal: {}, with {} txs", - block.header.chain_length, - block.header.block_hash(), - block.txs.len(); - "signer_sighash" => %block.header.signer_signature_hash(), - "consensus_hash" => %block.header.consensus_hash, - "parent_block_id" => %block.header.parent_block_id, - "timestamp" => block.header.timestamp, + block_metadata.block.header.chain_length, + block_metadata.block.header.block_hash(), + block_metadata.block.txs.len(); + "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "consensus_hash" => %block_metadata.block.header.consensus_hash, + "parent_block_id" => %block_metadata.block.header.parent_block_id, + "timestamp" => block_metadata.block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( self.burn_block.block_height, - &block, - size, - &consumed, - tx_events, + &block_metadata.block, + block_metadata.tenure_size, + &block_metadata.tenure_consumed, + block_metadata.tx_events, ); + self.tenure_cost = block_metadata.tenure_consumed; + self.tenure_budget = block_metadata.tenure_budget; + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; - Ok(block) + Ok(block_metadata.block) } #[cfg_attr(test, mutants::skip)] @@ -1273,8 +1289,20 @@ impl BlockMinerThread { } } }; + // Check if we can and should include a time-based tenure extend. if self.last_block_mined.is_some() { - // Check if we can extend the current tenure + // Do not extend if we have spent < 50% of the budget, since it is + // not necessary. + let usage = self + .tenure_budget + .proportion_largest_dimension(&self.tenure_cost); + if usage < self.config.miner.tenure_extend_cost_threshold { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); if get_epoch_time_secs() <= tenure_extend_timestamp && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout @@ -1284,6 +1312,7 @@ impl BlockMinerThread { tenure_change_tx: None, }); } + info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, From 21af65581308a89ab5cdee3f69a89d7b6fdcd630 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 12:31:34 -0500 Subject: [PATCH 44/59] test: add `tenure_extend_cost_threshold` integration test Also update the configs in other tests so that they do not need to wait for the default tenure extend threshold. --- testnet/stacks-node/src/tests/signer/v0.rs | 119 ++++++++++++++++++++- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2bbcccaced..daf3d44186 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2630,7 +2630,9 @@ fn tenure_extend_after_idle_signers() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -2680,7 +2682,9 @@ fn tenure_extend_with_other_transactions() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -2787,6 +2791,7 @@ fn tenure_extend_after_idle_miner() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2863,6 +2868,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2951,7 +2957,9 @@ fn stx_transfers_dont_effect_idle_timeout() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -3085,6 +3093,7 @@ fn idle_tenure_extend_active_mining() { |config| { // accept all proposals in the node config.connection_options.block_proposal_max_age_secs = u64::MAX; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -12592,3 +12601,107 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction +/// only after it has reached the cost threshold. +fn tenure_extend_cost_threshold() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let deployer_sk = Secp256k1PrivateKey::random(); + let deployer_addr = tests::to_addr(&deployer_sk); + let num_txs = 10; + let tx_fee = 10000; + let deploy_fee = 190200; + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(deployer_addr, deploy_fee + tx_fee * num_txs)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_extend_cost_threshold = 5; + }, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect_err("Received a tenure extend before cost threshold was reached"); + + // Now deploy a contract and call it in order to cross the threshold. + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + ["(var-get my-var)"; 250].join(" ") + ); + + // First, lets deploy the contract + let mut nonce = 0; + let contract_tx = make_contract_publish( + &deployer_sk, + nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + nonce += 1; + + // Wait for the contract to be included in a block + wait_for(60, || { + let account = get_account(&http_origin, &deployer_addr); + Ok(account.nonce == nonce) + }) + .expect("Contract not included in block"); + + // Now, lets call the contract a bunch of times to increase the tenure cost + for _ in 0..num_txs { + let call_tx = make_contract_call( + &deployer_sk, + nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + submit_tx(&http_origin, &call_tx); + nonce += 1; + } + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} From fe429f472fe3e99b6cf1909fc5574262109fd36c Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 17:56:55 +0000 Subject: [PATCH 45/59] report rejection step in miner log, ensure signe_test shutdown in integration test --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 5 +++-- testnet/stacks-node/src/tests/signer/v0.rs | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4adba66be1..7a778580d7 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -390,7 +390,7 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight as u64 { rejections = block_status.total_reject_weight as u64; - let rejections_timeout_tuple = self + let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) .last() @@ -399,10 +399,11 @@ impl SignerCoordinator { "Invalid rejection timeout step function definition".into(), ) })?; - rejections_timeout = rejections_timeout_tuple.1; + rejections_timeout = new_rejections_timeout; info!("Number of received rejections updated, resetting timeout"; "rejections" => rejections, "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); Counters::set( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9fa4fac3c0..e055ad82f2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -8111,6 +8111,9 @@ fn block_validation_check_rejection_timeout_heuristic() { // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); } /// Test scenario: From 60634c20c0392e30f812d5e942fe4a5721435e5d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 12:03:43 -0600 Subject: [PATCH 46/59] fix flake in check_block_times --- .../src/tests/nakamoto_integrations.rs | 49 ++++++++----------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3a75e6b630..f426995595 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -780,19 +780,18 @@ pub fn next_block_and_wait_for_commits( .map(|info| info.stacks_tip_height) .max() .unwrap(); - let last_commit_burn_hts: Vec<_> = node_counters + let last_commit_burn_hts = node_counters .iter() - .map(|c| &c.naka_submitted_commit_last_burn_height) - .collect(); - let last_commit_stacks_hts: Vec<_> = node_counters + .map(|c| &c.naka_submitted_commit_last_burn_height); + let last_commit_stacks_hts = node_counters .iter() - .map(|c| &c.naka_submitted_commit_last_stacks_tip) - .collect(); + .map(|c| &c.naka_submitted_commit_last_stacks_tip); next_block_and(btc_controller, timeout_secs, || { - let burn_height_committed_to = last_commit_burn_hts.iter().all(|last_commit_burn_height| { - last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before - }); + let burn_height_committed_to = + last_commit_burn_hts.clone().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { Ok(burn_height_committed_to) } else { @@ -801,7 +800,7 @@ pub fn next_block_and_wait_for_commits( } let stacks_tip_committed_to = last_commit_stacks_hts - .iter() + .clone() .all(|last_commit_stacks_height| { last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before }); @@ -7564,6 +7563,7 @@ fn check_block_times() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7606,19 +7606,13 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &counters.naka_submitted_commits); - let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_start = epoch_3.start_height; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height + 1; - Ok(info.burn_block_height == epoch_3_start) - }) - .unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height + 1; + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let time0_value = call_read_only( &naka_conf, @@ -7676,16 +7670,13 @@ fn check_block_times() { Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); - last_stacks_block_height = stacks_block_height; // Repeat these tests for 5 tenures for _ in 0..5 { - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - stacks_block_height = info.stacks_tip_height as u128; - Ok(stacks_block_height > last_stacks_block_height) - }) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + last_stacks_block_height = stacks_block_height; last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); From 6a238a76228c16c3a15e91a6d1352175299b5862 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 18:09:48 +0000 Subject: [PATCH 47/59] added set_miner_current_rejections_timeout and set_miner_current_rejections to Counters struct --- .../src/nakamoto_node/signer_coordinator.rs | 7 ++----- testnet/stacks-node/src/run_loop/neon.rs | 12 ++++++++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 7a778580d7..97b22ef68e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -406,11 +406,8 @@ impl SignerCoordinator { "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - Counters::set( - &counters.naka_miner_current_rejections_timeout_secs, - rejections_timeout.as_secs(), - ); - Counters::set(&counters.naka_miner_current_rejections, rejections); + counters.set_miner_current_rejections_timeout(rejections_timeout.as_secs()); + counters.set_miner_current_rejections(rejections); } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 1d372f1051..3ef3e45ccb 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -139,12 +139,12 @@ impl Counters { fn inc(_ctr: &RunLoopCounter) {} #[cfg(test)] - pub fn set(ctr: &RunLoopCounter, value: u64) { + fn set(ctr: &RunLoopCounter, value: u64) { ctr.0.store(value, Ordering::SeqCst); } #[cfg(not(test))] - pub fn set(_ctr: &RunLoopCounter, _value: u64) {} + fn set(_ctr: &RunLoopCounter, _value: u64) {} pub fn bump_blocks_processed(&self) { Counters::inc(&self.blocks_processed); @@ -217,6 +217,14 @@ impl Counters { pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } + + pub fn set_miner_current_rejections_timeout(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) + } + + pub fn set_miner_current_rejections(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections, value) + } } /// Coordinating a node running in neon mode. From 651018450831970647cb4aa8dfe1fba0162f6740 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 13:11:09 -0500 Subject: [PATCH 48/59] chore: formatting --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92211cbe8f..84516a1eac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` behave normally (e.g., submitting validation requests, submitting finished blocks). A dry run signer will error out if the supplied key is actually a registered signer. -- Miner config option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted +- Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed From 870187b7114c685a1bc75cf8a0a2be31e285b964 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 13:13:02 -0500 Subject: [PATCH 49/59] chore: move `dry_run` info to signer changelog --- CHANGELOG.md | 5 ----- stacks-signer/CHANGELOG.md | 10 ++++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84516a1eac..1644446dd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,11 +9,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will - run the signer binary as if it were a registered signer. Instead of broadcasting - `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` - behave normally (e.g., submitting validation requests, submitting finished blocks). A - dry run signer will error out if the supplied key is actually a registered signer. - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2697d93508..df30e0d0db 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,6 +5,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will + run the signer binary as if it were a registered signer. Instead of broadcasting + `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` + behave normally (e.g., submitting validation requests, submitting finished blocks). A + dry run signer will error out if the supplied key is actually a registered signer. + ## [3.1.0.0.4.0] ## Added From fb2a70717b9544b542490cabd83927e96f201326 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 12:30:08 -0600 Subject: [PATCH 50/59] test: fix reward-cycle flake in multiple_miners_empty_sortition --- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fa4a6ec672..cf3b45656c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -85,8 +85,9 @@ use crate::tests::nakamoto_integrations::{ POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, get_chain_info_opt, get_sortition_info, get_sortition_info_ch, - next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, + get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, + submit_tx_fallible, test_observer, }; use crate::tests::{ self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, @@ -11729,6 +11730,15 @@ fn multiple_miners_empty_sortition() { let last_active_sortition = get_sortition_info(&conf); assert!(last_active_sortition.was_sortition); + // check if we're about to cross a reward cycle boundary -- if so, we can't + // perform this test, because we can't tenure extend across the boundary + let pox_info = get_pox_info(&conf.node.data_url).unwrap(); + let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; + if blocks_until_next_cycle == 1 { + info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); + continue; + } + // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); From eb8e944862f0b876203cb5983725073edd41a1e5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 13:42:27 -0600 Subject: [PATCH 51/59] test: fix flake in test_tenure_extend_from_flashblocks, bump default test retry sleep from 100ms to 500ms (less log spam in tests) --- .../src/tests/nakamoto_integrations.rs | 83 ++++--------------- 1 file changed, 16 insertions(+), 67 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f426995595..1ae369a17e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -693,7 +693,7 @@ where error!("Timed out waiting for check to process"); return Err("Timed out".into()); } - thread::sleep(Duration::from_millis(100)); + thread::sleep(Duration::from_millis(500)); } Ok(()) } @@ -10764,8 +10764,6 @@ fn test_tenure_extend_from_flashblocks() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes @@ -10818,17 +10816,9 @@ fn test_tenure_extend_from_flashblocks() { ); submit_tx(&http_origin, &contract_tx); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&naka_conf.node.data_url, &deployer_addr).nonce; + Ok(sender_nonce > 0) }) .expect("Timed out waiting for interim blocks to be mined"); @@ -10836,39 +10826,23 @@ fn test_tenure_extend_from_flashblocks() { // stall miner and relayer - // make tenure but don't wait for a stacks block - next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + // make tenure + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - // prevent the mienr from sending another block-commit + // prevent the miner from sending another block-commit nakamoto_test_skip_commit_op.set(true); - // make sure we get a block-found tenure change - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - // make sure the relayer processes both sortitions - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); + let info_before = get_chain_info(&naka_conf); // mine another Bitcoin block right away, since it will contain a block-commit btc_regtest_controller.bootstrap_chain(1); - wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) - }) - .unwrap(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let info = get_chain_info(&naka_conf); + Ok(info.burn_block_height > info_before.burn_block_height + && info.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for interim blocks to be mined"); + .unwrap(); let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); @@ -10895,11 +10869,9 @@ fn test_tenure_extend_from_flashblocks() { // Given the above, this will be an `Extend` tenure. TEST_MINER_THREAD_STALL.set(false); - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) + let cur_sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(cur_sort_tip.block_height > sort_tip.block_height) }) .unwrap(); @@ -10977,7 +10949,6 @@ fn test_tenure_extend_from_flashblocks() { // wait for the miner directive to be processed wait_for(60, || { - sleep_ms(30_000); let directives_cnt = nakamoto_miner_directives.load(Ordering::SeqCst); Ok(directives_cnt > miner_directives_before) }) @@ -10985,7 +10956,7 @@ fn test_tenure_extend_from_flashblocks() { // wait for all of the aforementioned transactions to get mined wait_for(120, || { - // fill mempool with transactions that depend on the burn view + // check account nonces from the sent transactions for (sender_sk, account_before) in account_keys.iter().zip(accounts_before.iter()) { let sender_addr = tests::to_addr(sender_sk); let account = loop { @@ -11042,28 +11013,7 @@ fn test_tenure_extend_from_flashblocks() { } // mine one additional tenure, to verify that we're on track - let commits_before = commits_submitted.load(Ordering::SeqCst); - let node_info_before = get_chain_info_opt(&naka_conf).unwrap(); - - btc_regtest_controller.bootstrap_chain(1); - - wait_for(20, || { - Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) - }) - .unwrap(); - - // there was a sortition winner - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(sort_tip.sortition); - - wait_for(20, || { - let node_info = get_chain_info_opt(&naka_conf).unwrap(); - Ok( - node_info.burn_block_height > node_info_before.burn_block_height - && node_info.stacks_tip_height > node_info_before.stacks_tip_height, - ) - }) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // boot a follower. it should reach the chain tip info!("----- BEGIN FOLLOWR BOOTUP ------"); @@ -11118,9 +11068,8 @@ fn test_tenure_extend_from_flashblocks() { debug!("Booted follower-thread"); - let miner_info = get_chain_info_result(&naka_conf).unwrap(); - wait_for(300, || { + let miner_info = get_chain_info_result(&naka_conf).unwrap(); let Ok(info) = get_chain_info_result(&follower_conf) else { sleep_ms(1000); return Ok(false); From 6fccd18d9dfeccf1035d1b48a66062e4b57f3132 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 15:07:07 -0500 Subject: [PATCH 52/59] chore: add comments requested in code review Also add one more assertion to the test. --- stackslib/src/chainstate/nakamoto/miner.rs | 7 +++++ stackslib/src/config/mod.rs | 30 ++++++++++++++++++++-- stackslib/src/net/connection.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 5 ++++ 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 0156adbc96..d9ad1319f7 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,11 +150,18 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +/// Structure returned from `NakamotoBlockBuilder::build_nakamoto_block` with +/// information about the block that was built. pub struct BlockMetadata { + /// The block that was built pub block: NakamotoBlock, + /// The execution cost consumed so far by the current tenure pub tenure_consumed: ExecutionCost, + /// The cost budget for the current tenure pub tenure_budget: ExecutionCost, + /// The size of the blocks in the current tenure in bytes pub tenure_size: u64, + /// The events emitted by the transactions included in this block pub tx_events: Vec, } diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 3684b16a32..2e494cacef 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -86,18 +86,40 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( OP_TX_VOTE_AGG_ESTIM_SIZE ); +/// Default maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate +/// may be increased to when RBFing a transaction const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x +/// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin +/// transaction const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; +/// Default number of reward cycles of blocks to sync in a non-full inventory +/// sync const INV_REWARD_CYCLES_TESTNET: u64 = 6; +/// Default minimum time to wait between mining blocks in milliseconds. The +/// value must be greater than or equal to 1000 ms because if a block is mined +/// within the same second as its parent, it will be rejected by the signers. const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +/// Default time in milliseconds to pause after receiving the first threshold +/// rejection, before proposing a new block. const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +/// Default time in milliseconds to pause after receiving subsequent threshold +/// rejections, before proposing a new block. const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +/// Default time in milliseconds to wait for a Nakamoto block after seeing a +/// burnchain block before submitting a block commit. const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +/// Default percentage of the remaining tenure cost limit to consume each block const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +/// Default number of seconds to wait in-between polling the sortition DB to +/// see if we need to extend the ongoing tenure (e.g. because the current +/// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; - -// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +/// Default duration to wait before attempting to issue a tenure extend. +/// This should be greater than the signers' timeout. This is used for issuing +/// fallback tenure extends const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +/// Default percentage of block budget that must be used before attempting a +/// time-based tenure extend const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = @@ -1192,9 +1214,13 @@ pub struct BurnchainConfig { pub process_exit_at_block_height: Option, pub poll_time_secs: u64, pub satoshis_per_byte: u64, + /// Maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate may + /// be increased to when RBFing a transaction pub max_rbf: u64, pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, + /// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin + /// transaction pub rbf_fee_increment: u64, pub first_burn_block_height: Option, pub first_burn_block_timestamp: Option, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 09465721ba..1204bad7fd 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -379,6 +379,7 @@ pub struct ConnectionOptions { /// Units are milliseconds. A value of 0 means "never". pub log_neighbors_freq: u64, pub inv_sync_interval: u64, + // how many reward cycles of blocks to sync in a non-full inventory sync pub inv_reward_cycles: u64, pub download_interval: u64, pub pingback_timeout: u64, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 62391f14af..63154a9a57 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12683,6 +12683,11 @@ fn tenure_extend_cost_threshold() { }) .expect("Contract not included in block"); + // Ensure the tenure was not extended in that block + assert!(!last_block_contains_tenure_change_tx( + TenureChangeCause::Extended + )); + // Now, lets call the contract a bunch of times to increase the tenure cost for _ in 0..num_txs { let call_tx = make_contract_call( From 6041be6784e0b3576370428a265dab9521d54be9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 31 Jan 2025 16:04:03 -0500 Subject: [PATCH 53/59] fix: Disable flaky test `flash_blocks_on_epoch_3` for real this time --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 57b37f44e7..2f1ea4f219 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -84,7 +84,7 @@ jobs: - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY + # - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb From 8d2bb0d80db3210a44730d3252ffe0197a2e8f37 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 10:54:30 +0100 Subject: [PATCH 54/59] refactored block_validation_check_rejection_timeout_heuristic test --- testnet/stacks-node/src/tests/signer/v0.rs | 183 ++++----------------- 1 file changed, 33 insertions(+), 150 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e055ad82f2..26f95d9ce0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7947,166 +7947,49 @@ fn block_validation_check_rejection_timeout_heuristic() { // note we just use mined nakamoto_blocks as the second block is not going to be confirmed - info!("------------------------- Check Rejections-based timeout with 1 rejection -------------------------"); + let mut test_rejections = |signer_split_index: usize, expected_timeout: u64| { + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let (ignore_signers, reject_signers) = all_signers.split_at(signer_split_index); - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + info!("------------------------- Check Rejections-based timeout with {} rejections -------------------------", reject_signers.len()); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..19].to_vec()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(reject_signers.to_vec()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignore_signers.to_vec()); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) - .unwrap(); - - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 1) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 123 - ); - - info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[18], all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..18].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) - .unwrap(); - - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 2) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 20 - ); - - info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[17], all_signers[18], all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..17].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections( - timeout.as_secs(), - &[all_signers[17], all_signers[18], all_signers[19]], + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), ) .unwrap(); - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 3) - }) - .unwrap(); - - assert_eq!( signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 10 - ); - - info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![ - all_signers[16], - all_signers[17], - all_signers[18], - all_signers[19], - ]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..16].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); + .wait_for_block_rejections(timeout.as_secs(), &reject_signers) + .unwrap(); - signer_test - .wait_for_block_rejections( - timeout.as_secs(), - &[ - all_signers[16], - all_signers[17], - all_signers[18], - all_signers[19], - ], - ) + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= reject_signers.len() as u64) + }) .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + expected_timeout + ); + }; - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 4) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 99 - ); + test_rejections(19, 123); + test_rejections(18, 20); + test_rejections(17, 10); + test_rejections(16, 99); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From 151c844734122174e75a0f801d2d0f28c4085a08 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 11:00:18 +0100 Subject: [PATCH 55/59] use From based cast for u32->u64 --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 97b22ef68e..ff08633ac6 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -334,7 +334,7 @@ impl SignerCoordinator { return false; } // number or rejections changed? - if status.total_reject_weight as u64 != rejections { + if u64::from(status.total_reject_weight) != rejections { return false; } // enough signatures? @@ -388,8 +388,8 @@ impl SignerCoordinator { } }; - if rejections != block_status.total_reject_weight as u64 { - rejections = block_status.total_reject_weight as u64; + if rejections != u64::from(block_status.total_reject_weight) { + rejections = u64::from(block_status.total_reject_weight); let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) From f66c8944082a0040b3bb399a9f676a9f07497dff Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 11:08:20 +0100 Subject: [PATCH 56/59] removed useless line --- testnet/stacks-node/src/tests/signer/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5f41ff816a..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -936,7 +936,6 @@ fn setup_stx_btc_node( let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! From 400c806aaa812d4565b94d57be60527040fb31e0 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 3 Feb 2025 08:26:54 +0100 Subject: [PATCH 57/59] use u32 for rejections counter --- .../src/nakamoto_node/signer_coordinator.rs | 16 ++++++++-------- testnet/stacks-node/src/run_loop/neon.rs | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ff08633ac6..2138b7e767 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -70,7 +70,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: BTreeMap, + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -107,10 +107,10 @@ impl SignerCoordinator { let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); + let mut block_rejection_timeout_steps = BTreeMap::::new(); for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { let rejections_amount = - ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u64; + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u32; block_rejection_timeout_steps.insert(rejections_amount, *duration); } @@ -308,7 +308,7 @@ impl SignerCoordinator { counters: &Counters, ) -> Result, NakamotoNodeError> { // the amount of current rejections (used to eventually modify the timeout) - let mut rejections: u64 = 0; + let mut rejections: u32 = 0; // default timeout (the 0 entry must be always present) let mut rejections_timeout = self .block_rejection_timeout_steps @@ -334,7 +334,7 @@ impl SignerCoordinator { return false; } // number or rejections changed? - if u64::from(status.total_reject_weight) != rejections { + if status.total_reject_weight != rejections { return false; } // enough signatures? @@ -388,8 +388,8 @@ impl SignerCoordinator { } }; - if rejections != u64::from(block_status.total_reject_weight) { - rejections = u64::from(block_status.total_reject_weight); + if rejections != block_status.total_reject_weight { + rejections = block_status.total_reject_weight; let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) @@ -406,7 +406,7 @@ impl SignerCoordinator { "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - counters.set_miner_current_rejections_timeout(rejections_timeout.as_secs()); + counters.set_miner_current_rejections_timeout_secs(rejections_timeout.as_secs()); counters.set_miner_current_rejections(rejections); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 3ef3e45ccb..299335f35f 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -218,12 +218,12 @@ impl Counters { Counters::set(&self.microblocks_processed, value) } - pub fn set_miner_current_rejections_timeout(&self, value: u64) { + pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) { Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) } - pub fn set_miner_current_rejections(&self, value: u64) { - Counters::set(&self.naka_miner_current_rejections, value) + pub fn set_miner_current_rejections(&self, value: u32) { + Counters::set(&self.naka_miner_current_rejections, u64::from(value)) } } From 676506354ec8d0f496fc7e14c4d824013615918d Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 3 Feb 2025 13:57:28 -0800 Subject: [PATCH 58/59] update changelog for 3.1.0.0.5 --- CHANGELOG.md | 6 ++++++ stacks-signer/CHANGELOG.md | 28 +++++++++++++++++----------- 2 files changed, 23 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 64f0bc2164..b9bbfca0a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,12 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +### Changed + +### Fixed + +## [3.1.0.0.5] + - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index df30e0d0db..2e801d680d 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -9,6 +9,12 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added +### Changed + +## [3.1.0.0.5.0] + +### Added + - Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will run the signer binary as if it were a registered signer. Instead of broadcasting `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` @@ -17,44 +23,44 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.4.0] -## Added +### Added - When a new block proposal is received while the signer is waiting for an existing proposal to be validated, the signer will wait until the existing block is done validating before submitting the new one for validating. ([#5453](https://github.com/stacks-network/stacks-core/pull/5453)) - Introduced two new prometheus metrics: - `stacks_signer_block_validation_latencies_histogram`: the validation_time_ms reported by the node when validating a block proposal - `stacks_signer_block_response_latencies_histogram`: the "end-to-end" time it takes for the signer to issue a block response -## Changed +### Changed ## [3.1.0.0.3.0] -## Added +### Added - Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. -## Changed +### Changed - Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database - Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip -# [3.1.0.0.2.1] +## [3.1.0.0.2.1] -## Added +### Added -## Changed +### Changed - Prevent old reward cycle signers from processing block validation response messages that do not apply to blocks from their cycle. -# [3.1.0.0.2.1] +## [3.1.0.0.2.1] -## Added +### Added -## Changed +### Changed - Prevent old reward cycle signers from processing block validation response messages that do not apply to blocks from their cycle. ## [3.1.0.0.2.0] -## Added +### Added - **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) From e9e7af54ec296440faf07fa5c75384cbe9d0905b Mon Sep 17 00:00:00 2001 From: wileyj <2847772+wileyj@users.noreply.github.com> Date: Mon, 3 Feb 2025 14:37:31 -0800 Subject: [PATCH 59/59] Update CHANGELOG.md Co-authored-by: Brice --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b9bbfca0a4..1f7fce479b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,8 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [3.1.0.0.5] +### Added + - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed