From 8351ae292f7441c2d4f508a57c9f417fc3e8f32f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 1 Jan 2025 22:56:04 -0500 Subject: [PATCH 01/82] chore(stacks-common): Remove unused imports and enable warning --- stacks-common/src/address/b58.rs | 2 +- stacks-common/src/address/mod.rs | 3 +-- stacks-common/src/bitvec.rs | 1 - .../src/deps_common/bitcoin/blockdata/block.rs | 1 - .../src/deps_common/bitcoin/blockdata/script.rs | 1 - .../src/deps_common/bitcoin/blockdata/transaction.rs | 4 +--- .../deps_common/bitcoin/network/message_network.rs | 2 -- stacks-common/src/deps_common/bitcoin/util/hash.rs | 2 +- stacks-common/src/deps_common/httparse/mod.rs | 4 +--- stacks-common/src/libcommon.rs | 4 +--- stacks-common/src/types/chainstate.rs | 12 +++++------- stacks-common/src/types/mod.rs | 1 - stacks-common/src/util/chunked_encoding.rs | 3 ++- stacks-common/src/util/db.rs | 2 +- stacks-common/src/util/hash.rs | 7 ++----- stacks-common/src/util/log.rs | 3 +-- stacks-common/src/util/mod.rs | 1 - stacks-common/src/util/pipe.rs | 4 ---- stacks-common/src/util/retry.rs | 6 +----- stacks-common/src/util/secp256k1.rs | 6 ++---- stacks-common/src/util/vrf.rs | 7 ------- 21 files changed, 20 insertions(+), 56 deletions(-) diff --git a/stacks-common/src/address/b58.rs b/stacks-common/src/address/b58.rs index 6a135392e5..ffba441de6 100644 --- a/stacks-common/src/address/b58.rs +++ b/stacks-common/src/address/b58.rs @@ -14,7 +14,7 @@ //! Base58 encoder and decoder -use std::{error, fmt, str}; +use std::{fmt, str}; use crate::address::Error; use crate::util::hash::DoubleSha256; diff --git a/stacks-common/src/address/mod.rs b/stacks-common/src/address/mod.rs index 381456f661..8377d0087d 100644 --- a/stacks-common/src/address/mod.rs +++ b/stacks-common/src/address/mod.rs @@ -19,7 +19,7 @@ use std::{error, fmt}; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes::All as btc_opcodes; -use crate::deps_common::bitcoin::blockdata::script::{Builder, Instruction, Script}; +use crate::deps_common::bitcoin::blockdata::script::Builder; use crate::types::PublicKey; use crate::util::hash::Hash160; @@ -220,7 +220,6 @@ pub fn public_keys_to_address_hash( mod test { use super::*; use crate::util::hash::*; - use crate::util::log; use crate::util::secp256k1::Secp256k1PublicKey as PubKey; struct PubkeyFixture { diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 7c77e5da32..065dd5e814 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -260,7 +260,6 @@ mod test { use super::BitVec; use crate::codec::StacksMessageCodec; - use crate::util::hash::to_hex; fn check_set_get(mut input: BitVec<{ u16::MAX }>) { let original_input = input.clone(); diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs index af064511b5..9a797fd846 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs @@ -25,7 +25,6 @@ use crate::deps_common::bitcoin::blockdata::transaction::Transaction; use crate::deps_common::bitcoin::network::constants::Network; use crate::deps_common::bitcoin::network::encodable::VarInt; use crate::deps_common::bitcoin::network::serialize::BitcoinHash; -use crate::deps_common::bitcoin::util; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::deps_common::bitcoin::util::Error; use crate::deps_common::bitcoin::util::Error::{SpvBadProofOfWork, SpvBadTarget}; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index 34ee5897c3..cf0e3296b1 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -27,7 +27,6 @@ use std::mem::size_of; use std::{error, fmt}; -use serde; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs index c2d4c4e0a2..6dbf49bd5d 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs @@ -34,7 +34,6 @@ use crate::deps_common::bitcoin::network::serialize::{ self, serialize, BitcoinHash, SimpleDecoder, SimpleEncoder, }; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::to_hex; /// A reference to a transaction output #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] @@ -675,7 +674,7 @@ impl SigHashType { #[cfg(test)] mod tests { - use super::{SigHashType, Transaction, TxIn}; + use super::{Transaction, TxIn}; use crate::deps_common; use crate::deps_common::bitcoin::blockdata::script::Script; use crate::deps_common::bitcoin::network::serialize::{deserialize, BitcoinHash}; @@ -690,7 +689,6 @@ mod tests { #[test] fn test_is_coinbase() { - use crate::deps_common::bitcoin::blockdata::constants; use crate::deps_common::bitcoin::network::constants::Network; let genesis = deps_common::bitcoin::blockdata::constants::genesis_block(Network::Bitcoin); diff --git a/stacks-common/src/deps_common/bitcoin/network/message_network.rs b/stacks-common/src/deps_common/bitcoin/network/message_network.rs index 0cf486ba85..a42eb47aea 100644 --- a/stacks-common/src/deps_common/bitcoin/network/message_network.rs +++ b/stacks-common/src/deps_common/bitcoin/network/message_network.rs @@ -19,8 +19,6 @@ //! use crate::deps_common::bitcoin::network::address::Address; -use crate::deps_common::bitcoin::network::constants; -use crate::util; // Some simple messages diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index e1a9455e99..abfce8349f 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -18,7 +18,7 @@ use std::char::from_digit; use std::cmp::min; use std::io::{Cursor, Write}; -use std::{error, fmt, mem}; +use std::{fmt, mem}; use ripemd::Ripemd160; #[cfg(feature = "serde")] diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index b4c9250546..364fe0f8a7 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -30,7 +30,7 @@ //! Originally written by Sean McArthur. //! //! Modified by Jude Nelson to remove all unsafe code. -use std::{error, fmt, mem, result, str}; +use std::{fmt, mem, result, str}; macro_rules! next { ($bytes:ident) => {{ @@ -1282,8 +1282,6 @@ mod tests { #[test] fn test_std_error() { - use std::error::Error as StdError; - use super::Error; let err = Error::HeaderName; assert_eq!(err.to_string(), err.description_str()); diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 04c3acc1ea..34705bebda 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -1,5 +1,4 @@ #![allow(unused_macros)] -#![allow(unused_imports)] #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -7,6 +6,7 @@ #![cfg_attr(test, allow(unused_variables, unused_assignments))] #![allow(clippy::assertions_on_constants)] +#[allow(unused_imports)] #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; @@ -33,8 +33,6 @@ pub mod deps_common; pub mod bitvec; -use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId}; - pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; pub use crate::types::MINING_COMMITMENT_WINDOW; diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 630ce70c9d..e07e37f27a 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -2,24 +2,22 @@ use std::fmt::{self, Display}; use std::io::{Read, Write}; use std::str::FromStr; -use curve25519_dalek::digest::Digest; -use rand::{Rng, SeedableRng}; -use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; -use sha2::{Digest as Sha2Digest, Sha256, Sha512_256}; +use sha2::{Digest as Sha2Digest, Sha512_256}; use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use crate::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::{to_hex, DoubleSha256, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; +use crate::util::hash::{Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; use crate::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::util::uint::Uint256; use crate::util::vrf::{VRFProof, VRF_PROOF_ENCODED_SIZE}; pub type StacksPublicKey = Secp256k1PublicKey; pub type StacksPrivateKey = Secp256k1PrivateKey; +#[cfg(any(test, feature = "testing"))] +use crate::util::hash::DoubleSha256; + /// Hash of a Trie node. This is a SHA2-512/256. #[derive(Default)] pub struct TrieHash(pub [u8; 32]); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 93ebd17bc0..297d629254 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -30,7 +30,6 @@ use crate::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::consts::MICROSTACKS_PER_STACKS; -use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index 445ec5a831..328f94514f 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -445,9 +445,10 @@ impl Write for HttpChunkedTransferWriter<'_, '_, W> { } } +#[cfg(test)] mod test { use std::io; - use std::io::{Read, Write}; + use std::io::Read; use rand::RngCore; diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 53564af597..3a463df4f8 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -17,7 +17,7 @@ use std::backtrace::Backtrace; use std::sync::{LazyLock, Mutex}; use std::thread; -use std::time::{Duration, Instant}; +use std::time::Instant; use hashbrown::HashMap; use rand::{thread_rng, Rng}; diff --git a/stacks-common/src/util/hash.rs b/stacks-common/src/util/hash.rs index 666e72c8e2..85f357d21a 100644 --- a/stacks-common/src/util/hash.rs +++ b/stacks-common/src/util/hash.rs @@ -20,7 +20,6 @@ use std::{fmt, mem}; use ripemd::Ripemd160; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use sha2::{Digest, Sha256, Sha512, Sha512_256}; use sha3::Keccak256; @@ -29,7 +28,7 @@ use crate::types::StacksPublicKeyBuffer; use crate::util::pair::*; use crate::util::secp256k1::Secp256k1PublicKey; use crate::util::uint::Uint256; -use crate::util::{log, HexError}; +use crate::util::HexError; // hash function for Merkle trees pub trait MerkleHashFunc { @@ -659,9 +658,7 @@ pub fn bytes_to_hex(s: &[u8]) -> String { #[cfg(test)] mod test { - use super::{ - bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerklePath, MerkleTree, - }; + use super::{bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerkleTree}; struct MerkleTreeFixture { data: Vec>, diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index b0ac704f0c..1699aa6f54 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -15,13 +15,12 @@ // along with this program. If not, see . use std::io::Write; -use std::sync::Mutex; use std::time::{Duration, SystemTime}; use std::{env, io, thread}; use chrono::prelude::*; use lazy_static::lazy_static; -use slog::{BorrowedKV, Drain, FnValue, Level, Logger, OwnedKVList, Record, KV}; +use slog::{Drain, Level, Logger, OwnedKVList, Record, KV}; use slog_term::{CountingWriter, Decorator, RecordDecorator, Serializer}; lazy_static! { diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 8fc9499b2f..0e9ff49cca 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -28,7 +28,6 @@ pub mod secp256k1; pub mod uint; pub mod vrf; -use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; use std::path::{Path, PathBuf}; diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index 86d92abd61..4407fee71f 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -21,8 +21,6 @@ use std::io; use std::io::{Read, Write}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError, TrySendError}; -use crate::util::log; - /// Inter-thread pipe for streaming messages, built on channels. /// Used mainly in conjunction with networking. /// @@ -316,7 +314,6 @@ impl Write for PipeWrite { #[cfg(test)] mod test { - use std::io::prelude::*; use std::io::{Read, Write}; use std::{io, thread}; @@ -324,7 +321,6 @@ mod test { use rand::RngCore; use super::*; - use crate::util::*; #[test] fn test_connection_pipe_oneshot() { diff --git a/stacks-common/src/util/retry.rs b/stacks-common/src/util/retry.rs index e7f6c0b140..47801289a3 100644 --- a/stacks-common/src/util/retry.rs +++ b/stacks-common/src/util/retry.rs @@ -18,11 +18,7 @@ */ use std::io; -use std::io::prelude::*; -use std::io::{Read, Write}; - -use crate::util::hash::to_hex; -use crate::util::log; +use std::io::Read; /// Wrap a Read so that we store a copy of what was read. /// Used for re-trying reads when we don't know what to expect from the stream. diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 5c64838855..353c58c428 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rand::{thread_rng, RngCore}; +use rand::RngCore; use secp256k1; use secp256k1::ecdsa::{ RecoverableSignature as LibSecp256k1RecoverableSignature, RecoveryId as LibSecp256k1RecoveryID, @@ -24,11 +24,9 @@ use secp256k1::{ PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use super::hash::Sha256Sum; -use crate::impl_byte_array_message_codec; use crate::types::{PrivateKey, PublicKey}; use crate::util::hash::{hex_bytes, to_hex}; @@ -442,8 +440,8 @@ mod tests { use secp256k1::{PublicKey as LibSecp256k1PublicKey, Secp256k1}; use super::*; + use crate::util::get_epoch_time_ms; use crate::util::hash::hex_bytes; - use crate::util::{get_epoch_time_ms, log}; struct KeyFixture { input: I, diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 0c2b2c3dad..5c7439daf9 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -22,16 +22,11 @@ use std::fmt::Debug; use std::hash::{Hash, Hasher}; /// This codebase is based on routines defined in the IETF draft for verifiable random functions /// over elliptic curves (https://tools.ietf.org/id/draft-irtf-cfrg-vrf-02.html). -use std::ops::Deref; -use std::ops::DerefMut; use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use curve25519_dalek::scalar::Scalar as ed25519_Scalar; -use ed25519_dalek::{ - SecretKey as EdDalekSecretKeyBytes, SigningKey as EdPrivateKey, VerifyingKey as EdPublicKey, -}; use rand; use sha2::{Digest, Sha512}; @@ -535,10 +530,8 @@ impl VRF { #[cfg(test)] mod tests { - use curve25519_dalek::scalar::Scalar as ed25519_Scalar; use rand; use rand::RngCore; - use sha2::Sha512; use super::*; use crate::util::hash::hex_bytes; From 7a15524b4955dc3c79123b29641866d5b299f57c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 1 Jan 2025 23:10:32 -0500 Subject: [PATCH 02/82] chore: Move feature-gated import inside function --- stacks-common/src/types/chainstate.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index e07e37f27a..89b244ae42 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -15,9 +15,6 @@ use crate::util::vrf::{VRFProof, VRF_PROOF_ENCODED_SIZE}; pub type StacksPublicKey = Secp256k1PublicKey; pub type StacksPrivateKey = Secp256k1PrivateKey; -#[cfg(any(test, feature = "testing"))] -use crate::util::hash::DoubleSha256; - /// Hash of a Trie node. This is a SHA2-512/256. #[derive(Default)] pub struct TrieHash(pub [u8; 32]); @@ -450,6 +447,8 @@ impl BurnchainHeaderHash { index_root: &TrieHash, noise: u64, ) -> BurnchainHeaderHash { + use crate::util::hash::DoubleSha256; + let mut bytes = vec![]; bytes.extend_from_slice(&block_height.to_be_bytes()); bytes.extend_from_slice(index_root.as_bytes()); From 8d335291c39c51c324cbc0ad47c45f05e3598b23 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 2 Jan 2025 15:59:52 -0500 Subject: [PATCH 03/82] fix: Feature flag `slog_json` working again --- stacks-common/src/util/log.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 1699aa6f54..77a4950f81 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -190,6 +190,10 @@ impl TermFormat { #[cfg(feature = "slog_json")] fn make_json_logger() -> Logger { + use std::sync::Mutex; + + use slog::FnValue; + let def_keys = o!("file" => FnValue(move |info| { info.file() }), From 10e6c6d639eecd004311d0b699ee272e9fa2c59c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 3 Jan 2025 11:49:47 -0500 Subject: [PATCH 04/82] fix(stacks-common): Remove remaining unused imports --- stacks-common/src/types/chainstate.rs | 2 +- stacks-common/src/types/mod.rs | 1 - stacks-common/src/util/mod.rs | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 89b244ae42..b7ecae66c4 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -1,4 +1,4 @@ -use std::fmt::{self, Display}; +use std::fmt; use std::io::{Read, Write}; use std::str::FromStr; diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 297d629254..acb838f9c8 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 0e9ff49cca..46158d2f4f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -30,13 +30,13 @@ pub mod vrf; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; /// Given a relative path inside the Cargo workspace, return the absolute path #[cfg(any(test, feature = "testing"))] -pub fn cargo_workspace

(relative_path: P) -> PathBuf +pub fn cargo_workspace

(relative_path: P) -> std::path::PathBuf where P: AsRef, { From a02c9a60f5264ea82f571af150a594a6cdf23161 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 3 Jan 2025 17:36:04 -0500 Subject: [PATCH 05/82] chore: Apply Clippy lint `collection_is_never_used` --- stackslib/src/burnchains/tests/burnchain.rs | 13 ------ stackslib/src/burnchains/tests/db.rs | 4 -- stackslib/src/chainstate/coordinator/tests.rs | 33 -------------- .../chainstate/nakamoto/coordinator/tests.rs | 13 +----- .../src/chainstate/nakamoto/tests/mod.rs | 2 - stackslib/src/chainstate/stacks/block.rs | 13 ------ .../src/chainstate/stacks/boot/pox_4_tests.rs | 4 ++ .../src/chainstate/stacks/db/transactions.rs | 3 +- .../src/chainstate/stacks/index/test/trie.rs | 5 --- .../stacks/tests/block_construction.rs | 45 ------------------- .../stacks/tests/chain_histories.rs | 14 ------ stackslib/src/chainstate/stacks/tests/mod.rs | 4 -- .../src/chainstate/stacks/transaction.rs | 3 -- stackslib/src/core/tests/mod.rs | 4 +- stackslib/src/net/atlas/tests.rs | 5 --- stackslib/src/net/tests/download/epoch2x.rs | 5 +-- stackslib/src/net/tests/mempool/mod.rs | 5 +-- stackslib/src/net/tests/mod.rs | 6 +-- stackslib/src/net/tests/relay/nakamoto.rs | 8 ---- stackslib/src/util_lib/strings.rs | 2 +- testnet/stacks-node/src/event_dispatcher.rs | 4 ++ testnet/stacks-node/src/tests/epoch_21.rs | 6 --- .../src/tests/nakamoto_integrations.rs | 7 ++- .../src/tests/neon_integrations.rs | 10 ----- 24 files changed, 21 insertions(+), 197 deletions(-) diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 7f6be5bcf8..278d9b2910 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -698,19 +698,14 @@ fn test_burn_snapshot_sequence() { initial_reward_start_block: first_block_height, }; - let mut leader_private_keys = vec![]; let mut leader_public_keys = vec![]; let mut leader_bitcoin_public_keys = vec![]; - let mut leader_bitcoin_addresses = vec![]; for i in 0..32 { let mut csprng: ThreadRng = thread_rng(); let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::generate(&mut csprng)); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); - let privkey_hex = vrf_privkey.to_hex(); - leader_private_keys.push(privkey_hex); - let pubkey_hex = vrf_pubkey.to_hex(); leader_public_keys.push(pubkey_hex); @@ -718,12 +713,6 @@ fn test_burn_snapshot_sequence() { let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); - - leader_bitcoin_addresses.push(BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_data(&bitcoin_publickey.to_bytes()).0, - )); } let mut expected_burn_total: u64 = 0; @@ -732,7 +721,6 @@ fn test_burn_snapshot_sequence() { let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); let mut prev_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); - let mut all_stacks_block_hashes = vec![]; for i in 0..32 { let mut block_ops = vec![]; @@ -823,7 +811,6 @@ fn test_burn_snapshot_sequence() { burn_header_hash: burn_block_hash.clone(), }; - all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone()); block_ops.push(BlockstackOperationType::LeaderBlockCommit( next_block_commit, )); diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index c8f568b5bf..834a062088 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -919,8 +919,6 @@ fn test_update_block_descendancy_with_fork() { let mut cmts_genesis = vec![]; let mut cmts_invalid = vec![]; - let mut fork_parent = None; - let mut fork_parent_block_header: Option = None; let mut fork_cmts = vec![]; for i in 0..5 { @@ -954,7 +952,6 @@ fn test_update_block_descendancy_with_fork() { }; fork_headers.push(block_header.clone()); - fork_parent_block_header = Some(block_header); } let mut am_id = 0; @@ -1018,7 +1015,6 @@ fn test_update_block_descendancy_with_fork() { fork_cmts.push(fork_cmt.clone()); parent = Some(cmt); - fork_parent = Some(fork_cmt); if i == 0 { am_id = { diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 0863708122..a56d0c6f67 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -2268,7 +2268,6 @@ fn test_sortition_with_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2442,10 +2441,6 @@ fn test_sortition_with_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2540,7 +2535,6 @@ fn test_sortition_with_burner_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2688,10 +2682,6 @@ fn test_sortition_with_burner_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2804,7 +2794,6 @@ fn test_pox_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_cycle_count = 0; @@ -2972,10 +2961,6 @@ fn test_pox_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -3096,7 +3081,6 @@ fn test_stx_transfer_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_recipients = HashSet::new(); @@ -3319,10 +3303,6 @@ fn test_stx_transfer_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5303,7 +5283,6 @@ fn test_sortition_with_sunset() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5487,10 +5466,6 @@ fn test_sortition_with_sunset() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5616,7 +5591,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5828,10 +5802,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -6479,7 +6449,6 @@ fn test_pox_fork_out_of_order() { let mut sortition_ids_diverged = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // setup: // 2 forks: 0 - 1 - 2 - 3 - 4 - 5 - 11 - 12 - 13 - 14 - 15 @@ -6560,8 +6529,6 @@ fn test_pox_fork_out_of_order() { .unwrap() .block_height ); - - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index e0b3375452..b8c93d427f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -112,6 +112,8 @@ fn advance_to_nakamoto( let default_pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut tip = None; for sortition_height in 0..11 { // stack to pox-3 in cycle 7 @@ -347,9 +349,6 @@ fn replay_reward_cycle( .step_by(reward_cycle_length) .collect(); - let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect(); - indexes.shuffle(&mut thread_rng()); - for burn_ops in burn_ops.iter() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); } @@ -845,7 +844,6 @@ fn block_descendant() { boot_plan.pox_constants = pox_constants; let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); - let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; @@ -854,7 +852,6 @@ fn block_descendant() { loop { let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - blocks.push(block); if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) { info!("At prepare phase start"; "burn_height" => burn_height); @@ -3206,9 +3203,6 @@ fn test_stacks_on_burnchain_ops() { ); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut consensus_hashes = vec![]; - let mut fee_counts = vec![]; let stx_miner_key = peer.miner.nakamoto_miner_key(); let mut extra_burn_ops = vec![]; @@ -3406,8 +3400,6 @@ fn test_stacks_on_burnchain_ops() { }) .sum::(); - consensus_hashes.push(consensus_hash); - fee_counts.push(fees); let mut blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -3449,7 +3441,6 @@ fn test_stacks_on_burnchain_ops() { ); all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } // check receipts for burn ops diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bd415b68b0..4d64a1e4f1 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2529,8 +2529,6 @@ fn parse_vote_for_aggregate_public_key_invalid() { }; invalid_function_arg_reward_cycle.set_origin_nonce(1); - let mut account_nonces = std::collections::HashMap::new(); - account_nonces.insert(invalid_contract_name.origin_address(), 1); for (i, tx) in vec![ invalid_contract_address, invalid_contract_name, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 51c53c94de..fa08e0f06d 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1146,19 +1146,6 @@ mod test { StacksEpochId::latest(), ); - // remove all coinbases - let mut txs_anchored = vec![]; - - for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} - } - txs_anchored.push(tx); - } - // make microblocks with 3 transactions each (or fewer) for i in 0..(all_txs.len() / 3) { let txs = vec![ diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 072f1d33ef..f6c9b7d012 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -928,6 +928,8 @@ fn pox_lock_unlock() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; // Advance into pox4 @@ -2685,6 +2687,8 @@ fn pox_4_delegate_stack_increase_events() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; let alice_key = keys.pop().unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e56624b84f..87e29e3f10 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -72,8 +72,9 @@ impl TryFrom for HashableClarityValue { impl std::hash::Hash for HashableClarityValue { fn hash(&self, state: &mut H) { - #[allow(clippy::unwrap_used)] + #[allow(clippy::unwrap_used, clippy::collection_is_never_read)] // this unwrap is safe _as long as_ TryFrom was used as a constructor + // Also, this function has side effects, which cause Clippy to wrongly think `bytes` is unused let bytes = self.0.serialize_to_vec().unwrap(); bytes.hash(state); } diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index 8625527a16..9a130bf9d7 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -1245,8 +1245,6 @@ fn trie_cursor_splice_leaf_4() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; - // splice in a node in each path segment for k in 0..5 { let mut path = vec![ @@ -1274,7 +1272,6 @@ fn trie_cursor_splice_leaf_4() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); @@ -1338,7 +1335,6 @@ fn trie_cursor_splice_leaf_2() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; // splice in a node in each path segment for k in 0..10 { @@ -1363,7 +1359,6 @@ fn trie_cursor_splice_leaf_2() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index bcf7611695..4b28f637a5 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -187,7 +187,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -271,8 +270,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -324,7 +321,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -412,8 +408,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -462,7 +456,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -574,8 +567,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1375,7 +1366,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -1515,8 +1505,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1794,7 +1782,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -1889,8 +1876,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1936,7 +1921,6 @@ fn test_build_anchored_blocks_empty_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2025,8 +2009,6 @@ fn test_build_anchored_blocks_empty_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2079,7 +2061,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2203,8 +2184,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2235,7 +2214,6 @@ fn test_build_anchored_blocks_invalid() { let mut last_block: Option = None; let mut last_valid_block: Option = None; - let mut last_tip: Option = None; let mut last_parent: Option = None; let mut last_parent_tip: Option = None; @@ -2267,8 +2245,6 @@ fn test_build_anchored_blocks_invalid() { tip = resume_tip.clone().unwrap(); } - last_tip = Some(tip.clone()); - let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_opt = if tenure_id != bad_block_tenure { @@ -2444,7 +2420,6 @@ fn test_build_anchored_blocks_bad_nonces() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { eprintln!("Start tenure {:?}", tenure_id); // send transactions to the mempool @@ -2640,8 +2615,6 @@ fn test_build_anchored_blocks_bad_nonces() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2699,7 +2672,6 @@ fn test_build_microblock_stream_forks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2910,8 +2882,6 @@ fn test_build_microblock_stream_forks() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); } @@ -3494,12 +3464,6 @@ fn test_contract_call_across_clarity_versions() { let num_blocks = 10; let mut anchored_sender_nonce = 0; - let mut mblock_privks = vec![]; - for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); - mblock_privks.push(mblock_privk); - } - let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -4566,7 +4530,6 @@ fn mempool_incorporate_pox_unlocks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -4719,11 +4682,6 @@ fn mempool_incorporate_pox_unlocks() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - - last_block = Some(StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - )); } } @@ -4763,7 +4721,6 @@ fn test_fee_order_mismatch_nonce_order() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let sender_nonce = 0; - let mut last_block = None; // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); @@ -4852,8 +4809,6 @@ fn test_fee_order_mismatch_nonce_order() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index b8441a3cbb..763942c684 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -281,8 +281,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -415,8 +413,6 @@ where chain_tip.anchored_header.as_stacks_epoch2().unwrap(), )); - sortition_winners.push(miner_1.origin_address().unwrap()); - let mut next_miner_trace = TestMinerTracePoint::new(); next_miner_trace.add( miner_1.id, @@ -631,7 +627,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); next_miner_trace.add( miner_1.id, @@ -653,7 +648,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); next_miner_trace.add( miner_2.id, @@ -735,8 +729,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -960,7 +952,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -973,7 +964,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); } // add both blocks to the miner trace, because in this test runner, there will be _two_ @@ -999,8 +989,6 @@ where test_debug!("\n\nMiner 1 and Miner 2 now separate\n\n"); - let mut sortition_winners_1 = sortition_winners.clone(); - let mut sortition_winners_2 = sortition_winners.clone(); let snapshot_at_fork = { let ic = burn_node.sortdb.index_conn(); let tip = fork.get_tip(&ic); @@ -1244,7 +1232,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners_1.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -1257,7 +1244,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners_2.push(miner_2.origin_address().unwrap()); } // each miner produced a block; just one of them got accepted diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 9f5dd9c860..358ab3bf71 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -839,7 +839,6 @@ pub fn check_mining_reward( block_height: u64, prev_block_rewards: &Vec>, ) -> bool { - let mut block_rewards = HashMap::new(); let mut stream_rewards = HashMap::new(); let mut heights = HashMap::new(); let mut confirmed = HashSet::new(); @@ -849,9 +848,6 @@ pub fn check_mining_reward( &reward.consensus_hash, &reward.block_hash, ); - if reward.coinbase > 0 { - block_rewards.insert(ibh.clone(), reward.clone()); - } if let MinerPaymentTxFees::Epoch2 { streamed, .. } = &reward.tx_fees { if *streamed > 0 { stream_rewards.insert(ibh.clone(), reward.clone()); diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index d813dbcf01..765da5499d 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -3418,9 +3418,6 @@ mod test { let function_name = ClarityName::try_from("hello-function-name").unwrap(); let function_args = vec![Value::Int(0)]; - let mut contract_name_bytes = vec![contract_name.len() as u8]; - contract_name_bytes.extend_from_slice(contract_name.as_bytes()); - let mut contract_call_bytes = vec![]; address .consensus_serialize(&mut contract_call_bytes) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 4477c93b93..ed62de2b42 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -2692,7 +2692,6 @@ fn test_filter_txs_by_type() { version: 1, bytes: Hash160([0xff; 20]), }; - let mut txs = vec![]; let block_height = 10; let mut total_len = 0; @@ -2756,8 +2755,7 @@ fn test_filter_txs_by_type() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); - txs.push(tx); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 8094c77799..d0ecd2fe22 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -685,20 +685,15 @@ fn test_downloader_context_attachment_requests() { let peer_url_3 = request_3.get_url().clone(); let request_4 = inventories_requests.pop().unwrap(); let peer_url_4 = request_4.get_url().clone(); - let mut responses = HashMap::new(); let response_1 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_1.clone(), Some(response_1.clone())); let response_2 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_2.clone(), Some(response_2.clone())); let response_3 = new_attachments_inventory_response(vec![(0, vec![0, 1, 1]), (1, vec![1, 0, 0])]); - responses.insert(peer_url_3.clone(), Some(response_3.clone())); - responses.insert(peer_url_4, None); inventories_results .succeeded diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 9c995f1f32..fe193397ed 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -477,10 +477,9 @@ where info!("Completed walk round {} step(s)", round); - let mut peer_invs = vec![]; for peer in peers.iter_mut() { - let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks); - peer_invs.push(peer_inv); + // TODO: Remove if this function has no side effects + let _ = get_blocks_inventory(peer, 0, num_burn_blocks); let availability = get_peer_availability( peer, diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 436e5a315a..0b3ca27913 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -765,7 +765,6 @@ fn test_mempool_sync_2_peers_problematic() { let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); for i in 0..num_txs { @@ -792,8 +791,6 @@ fn test_mempool_sync_2_peers_problematic() { let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); let tx_fee = tx.get_tx_fee(); - txs.insert(tx.txid(), tx.clone()); - // should succeed MemPoolDB::try_add_tx( &mut mempool_tx, @@ -813,7 +810,7 @@ fn test_mempool_sync_2_peers_problematic() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); peer_1.mempool = Some(peer_1_mempool); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 3a07ed006c..0329adc183 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -679,7 +679,6 @@ impl NakamotoBootPlan { let mut all_blocks = vec![]; let mut malleablized_block_ids = HashSet::new(); - let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; @@ -761,7 +760,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(next_consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() @@ -862,7 +860,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -958,14 +955,13 @@ impl NakamotoBootPlan { // each transaction was mined in the same order as described in the boot plan, // and it succeeded. - let mut burn_receipts = vec![]; let mut stacks_receipts = vec![]; for receipt in observed_block.receipts.iter() { match &receipt.transaction { TransactionOrigin::Stacks(..) => { stacks_receipts.push(receipt); } - TransactionOrigin::Burn(..) => burn_receipts.push(receipt), + TransactionOrigin::Burn(..) => {} } } diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 606f1f3fb2..e26f1a3142 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -212,9 +212,6 @@ impl SeedNode { let test_stackers = peer.config.test_stackers.take().unwrap(); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut rc_blocks = vec![]; - let mut rc_burn_ops = vec![]; // have the peer mine some blocks for two reward cycles for i in 0..(2 * rc_len) { @@ -330,15 +327,10 @@ impl SeedNode { .burnchain .is_reward_cycle_start(tip.block_height) { - rc_blocks.push(all_blocks.clone()); - rc_burn_ops.push(all_burn_ops.clone()); - - all_burn_ops.clear(); all_blocks.clear(); } all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } peer.config.test_signers = Some(test_signers); diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index d1fb48c86b..87daf94ace 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -353,7 +353,7 @@ mod test { let mut contract_bytes = vec![s.len() as u8]; contract_bytes.extend_from_slice(contract_str.as_bytes()); - check_codec_and_corruption::(&contract_str, &clarity_bytes); + check_codec_and_corruption::(&contract_str, &contract_bytes); } #[test] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index da1668cdd2..18ee8c1797 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2266,6 +2266,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(request) = server.recv() { attempt += 1; @@ -2331,6 +2333,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(mut request) = server.recv() { attempt += 1; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index d50cac0117..eaa1e584d7 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1045,7 +1045,6 @@ fn transition_adds_get_pox_addr_recipients() { ); let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let mut expected_pox_addrs = HashSet::new(); @@ -1056,7 +1055,6 @@ fn transition_adds_get_pox_addr_recipients() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, @@ -1353,8 +1351,6 @@ fn transition_adds_mining_from_segwit() { u32::MAX, ); - let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1363,8 +1359,6 @@ fn transition_adds_mining_from_segwit() { let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 1b84b9c0cd..9e4b07b119 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2933,6 +2933,8 @@ fn block_proposal_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let path = format!("{http_origin}/v3/block_proposal"); + // Clippy thinks this is unused, but it seems to be holding a lock + #[allow(clippy::collection_is_never_read)] let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in test_cases.iter().enumerate() @@ -10395,7 +10397,6 @@ fn clarity_cost_spend_down() { .get_stacks_blocks_processed(); // Pause mining so we can add all our transactions to the mempool at once. TEST_MINE_STALL.lock().unwrap().replace(true); - let mut submitted_txs = vec![]; for _nmb_tx in 0..nmb_txs_per_signer { for sender_sk in sender_sks.iter() { let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); @@ -10411,9 +10412,7 @@ fn clarity_cost_spend_down() { &[], ); match submit_tx_fallible(&http_origin, &contract_tx) { - Ok(txid) => { - submitted_txs.push(txid); - } + Ok(_txid) => {} Err(_e) => { // If we fail to submit a tx, we need to make sure we don't // increment the nonce for this sender, so we don't end up diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index a3ce78eb24..ed5fd9bbaa 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8473,8 +8473,6 @@ fn atlas_stress_integration_test() { panic!(); } - let mut all_zonefiles = vec![]; - // make a _ton_ of name-imports for i in 0..batches { let account_before = get_account(&http_origin, &to_addr(&user_1)); @@ -8486,8 +8484,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_3 = make_contract_call( &user_1, 2 + (batch_size * i + j) as u64, @@ -8675,8 +8671,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_6 = make_contract_call( &users[batches * batch_size + j], 1, @@ -8739,8 +8733,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_7 = make_contract_call( &users[batches * batch_size + j], 2, @@ -8802,8 +8794,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_8 = make_contract_call( &users[batches * batch_size + j], 3, From d4ad769aae018b20ce07011b1880523a5f98ac8d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 8 Jan 2025 13:20:57 -0600 Subject: [PATCH 06/82] feat: implement dry-run mode for signer --- stacks-signer/src/client/stackerdb.rs | 69 +++++++++++++++++++-------- stacks-signer/src/config.rs | 39 +++++++++++++-- stacks-signer/src/runloop.rs | 56 +++++++++++++++------- stacks-signer/src/v0/signer.rs | 44 +++++++++++++---- 4 files changed, 158 insertions(+), 50 deletions(-) diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 0316976a4c..1da6618711 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -19,12 +19,13 @@ use clarity::codec::read_next; use hashbrown::HashMap; use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; -use slog::{slog_debug, slog_warn}; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPrivateKey; -use stacks_common::{debug, warn}; +use stacks_common::util::hash::to_hex; +use stacks_common::{debug, info, warn}; use crate::client::{retry_with_exponential_backoff, ClientError}; -use crate::config::SignerConfig; +use crate::config::{SignerConfig, SignerConfigMode}; /// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID #[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] @@ -36,6 +37,12 @@ impl std::fmt::Display for SignerSlotID { } } +#[derive(Debug)] +enum StackerDBMode { + DryRun, + Normal { signer_slot_id: SignerSlotID }, +} + /// The StackerDB client for communicating with the .signers contract #[derive(Debug)] pub struct StackerDB { @@ -46,32 +53,42 @@ pub struct StackerDB { stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session slot_versions: HashMap>, - /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - signer_slot_id: SignerSlotID, + /// The running mode of the stackerdb (whether the signer is running in dry-run or + /// normal operation) + mode: StackerDBMode, /// The reward cycle of the connecting signer reward_cycle: u64, } impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { + let mode = match config.signer_mode { + SignerConfigMode::DryRun => StackerDBMode::DryRun, + SignerConfigMode::Normal { + ref signer_slot_id, .. + } => StackerDBMode::Normal { + signer_slot_id: *signer_slot_id, + }, + }; + Self::new( &config.node_host, config.stacks_private_key, config.mainnet, config.reward_cycle, - config.signer_slot_id, + mode, ) } } impl StackerDB { - /// Create a new StackerDB client - pub fn new( + /// Create a new StackerDB client running in normal operation + fn new( host: &str, stacks_private_key: StacksPrivateKey, is_mainnet: bool, reward_cycle: u64, - signer_slot_id: SignerSlotID, + signer_mode: StackerDBMode, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); for msg_id in M::all() { @@ -84,7 +101,7 @@ impl StackerDB { signers_message_stackerdb_sessions, stacks_private_key, slot_versions: HashMap::new(), - signer_slot_id, + mode: signer_mode, reward_cycle, } } @@ -110,18 +127,33 @@ impl StackerDB { msg_id: &M, message_bytes: Vec, ) -> Result { - let slot_id = self.signer_slot_id; + let StackerDBMode::Normal { + signer_slot_id: slot_id, + } = &self.mode + else { + info!( + "Dry-run signer would have sent a stackerdb message"; + "message_id" => ?msg_id, + "message_bytes" => to_hex(&message_bytes) + ); + return Ok(StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: None, + code: None, + }); + }; loop { let mut slot_version = if let Some(versions) = self.slot_versions.get_mut(msg_id) { - if let Some(version) = versions.get(&slot_id) { + if let Some(version) = versions.get(slot_id) { *version } else { - versions.insert(slot_id, 0); + versions.insert(*slot_id, 0); 1 } } else { let mut versions = HashMap::new(); - versions.insert(slot_id, 0); + versions.insert(*slot_id, 0); self.slot_versions.insert(*msg_id, versions); 1 }; @@ -143,7 +175,7 @@ impl StackerDB { if let Some(versions) = self.slot_versions.get_mut(msg_id) { // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); + versions.insert(*slot_id, slot_version.saturating_add(1)); } else { return Err(ClientError::NotConnected); } @@ -165,7 +197,7 @@ impl StackerDB { } if let Some(versions) = self.slot_versions.get_mut(msg_id) { // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); + versions.insert(*slot_id, slot_version.saturating_add(1)); } else { return Err(ClientError::NotConnected); } @@ -216,11 +248,6 @@ impl StackerDB { u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") } - /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&self) -> SignerSlotID { - self.signer_slot_id - } - /// Get the session corresponding to the given message ID if it exists pub fn get_session_mut(&mut self, msg_id: &M) -> Option<&mut StackerDBSession> { self.signers_message_stackerdb_sessions.get_mut(msg_id) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index c100703fc9..a1ff94ef2f 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -40,6 +40,7 @@ const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; const TENURE_IDLE_TIMEOUT_SECS: u64 = 300; +const DEFAULT_DRY_RUN: bool = false; #[derive(thiserror::Error, Debug)] /// An error occurred parsing the provided configuration @@ -106,15 +107,36 @@ impl Network { } } +/// Signer config mode (whether dry-run or real) +#[derive(Debug, Clone)] +pub enum SignerConfigMode { + /// Dry run operation: signer is not actually registered, the signer + /// will not submit stackerdb messages, etc. + DryRun, + /// Normal signer operation: if registered, the signer will submit + /// stackerdb messages, etc. + Normal { + /// The signer ID assigned to this signer (may be different from signer_slot_id) + signer_id: u32, + /// The signer stackerdb slot id (may be different from signer_id) + signer_slot_id: SignerSlotID, + }, +} + +impl std::fmt::Display for SignerConfigMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SignerConfigMode::DryRun => write!(f, "Dry-Run signer"), + SignerConfigMode::Normal { signer_id, .. } => write!(f, "signer #{signer_id}"), + } + } +} + /// The Configuration info needed for an individual signer per reward cycle #[derive(Debug, Clone)] pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer (may be different from signer_slot_id) - pub signer_id: u32, - /// The signer stackerdb slot id (may be different from signer_id) - pub signer_slot_id: SignerSlotID, /// The registered signers for this reward cycle pub signer_entries: SignerEntries, /// The signer slot ids of all signers registered for this reward cycle @@ -141,6 +163,8 @@ pub struct SignerConfig { pub tenure_idle_timeout: Duration, /// The maximum age of a block proposal in seconds that will be processed by the signer pub block_proposal_max_age_secs: u64, + /// The running mode for the signer (dry-run or normal) + pub signer_mode: SignerConfigMode, } /// The parsed configuration for the signer @@ -181,6 +205,8 @@ pub struct GlobalConfig { pub tenure_idle_timeout: Duration, /// The maximum age of a block proposal that will be processed by the signer pub block_proposal_max_age_secs: u64, + /// Is this signer binary going to be running in dry-run mode? + pub dry_run: bool, } /// Internal struct for loading up the config file @@ -220,6 +246,8 @@ struct RawConfigFile { pub tenure_idle_timeout_secs: Option, /// The maximum age of a block proposal (in secs) that will be processed by the signer. pub block_proposal_max_age_secs: Option, + /// Is this signer binary going to be running in dry-run mode? + pub dry_run: Option, } impl RawConfigFile { @@ -321,6 +349,8 @@ impl TryFrom for GlobalConfig { .block_proposal_max_age_secs .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + let dry_run = raw_data.dry_run.unwrap_or(DEFAULT_DRY_RUN); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -338,6 +368,7 @@ impl TryFrom for GlobalConfig { block_proposal_validation_timeout, tenure_idle_timeout, block_proposal_max_age_secs, + dry_run, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 69dc2dd843..f718373c47 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -25,7 +25,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; -use crate::config::{GlobalConfig, SignerConfig}; +use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; #[cfg(any(test, feature = "testing"))] use crate::v0::tests::TEST_SKIP_SIGNER_CLEANUP; use crate::Signer as SignerTrait; @@ -39,6 +39,9 @@ pub enum ConfigurationError { /// The stackerdb signer config is not yet updated #[error("The stackerdb config is not yet updated")] StackerDBNotUpdated, + /// The signer binary is configured as dry-run, but is also registered for this cycle + #[error("The signer binary is configured as dry-run, but is also registered for this cycle")] + DryRunStackerIsRegistered, } /// The internal signer state info @@ -258,27 +261,48 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); e })?; + + let dry_run = self.config.dry_run; let current_addr = self.stacks_client.get_signer_address(); - let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { - warn!( + let signer_config_mode = if !dry_run { + let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { + warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); - return Ok(None); - }; - let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { - warn!( - "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + return Ok(None); + }; + let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { + warn!( + "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + ); + return Ok(None); + }; + info!( + "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - return Ok(None); + SignerConfigMode::Normal { + signer_slot_id: *signer_slot_id, + signer_id: *signer_id, + } + } else { + if signer_slot_ids.contains_key(current_addr) { + error!( + "Signer is configured for dry-run, but the signer address {current_addr} was found in stacker db." + ); + return Err(ConfigurationError::DryRunStackerIsRegistered); + }; + if signer_entries.signer_addr_to_id.contains_key(current_addr) { + warn!( + "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + ); + return Ok(None); + }; + SignerConfigMode::DryRun }; - info!( - "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." - ); Ok(Some(SignerConfig { reward_cycle, - signer_id: *signer_id, - signer_slot_id: *signer_slot_id, + signer_mode: signer_config_mode, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, @@ -299,9 +323,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let reward_index = reward_cycle % 2; let new_signer_config = match self.get_signer_config(reward_cycle) { Ok(Some(new_signer_config)) => { - let signer_id = new_signer_config.signer_id; + let signer_mode = new_signer_config.signer_mode.clone(); let new_signer = Signer::new(new_signer_config); - info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initialized signer state."); + info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as {signer_mode}. Initialized signer state."); ConfiguredSigner::RegisteredSigner(new_signer) } Ok(None) => { diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index fb52394771..f7b7db084c 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -39,11 +39,25 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{SignerSlotID, StackerDB, StacksClient}; -use crate::config::SignerConfig; +use crate::config::{SignerConfig, SignerConfigMode}; use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +/// Signer running mode (whether dry-run or real) +#[derive(Debug)] +pub enum SignerMode { + /// Dry run operation: signer is not actually registered, the signer + /// will not submit stackerdb messages, etc. + DryRun, + /// Normal signer operation: if registered, the signer will submit + /// stackerdb messages, etc. + Normal { + /// The signer ID assigned to this signer (may be different from signer_slot_id) + signer_id: u32, + }, +} + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -57,8 +71,8 @@ pub struct Signer { pub stackerdb: StackerDB, /// Whether the signer is a mainnet signer or not pub mainnet: bool, - /// The signer id - pub signer_id: u32, + /// The running mode of the signer (whether dry-run or normal) + pub mode: SignerMode, /// The signer slot ids for the signers in the reward cycle pub signer_slot_ids: Vec, /// The addresses of other signers @@ -80,9 +94,18 @@ pub struct Signer { pub block_proposal_max_age_secs: u64, } +impl std::fmt::Display for SignerMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SignerMode::DryRun => write!(f, "Dry-Run signer"), + SignerMode::Normal { signer_id } => write!(f, "Signer #{signer_id}"), + } + } +} + impl std::fmt::Display for Signer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Cycle #{} Signer #{}", self.reward_cycle, self.signer_id,) + write!(f, "Cycle #{} {}", self.reward_cycle, self.mode) } } @@ -275,10 +298,13 @@ impl SignerTrait for Signer { impl From for Signer { fn from(signer_config: SignerConfig) -> Self { let stackerdb = StackerDB::from(&signer_config); - debug!( - "Reward cycle #{} Signer #{}", - signer_config.reward_cycle, signer_config.signer_id, - ); + let mode = match signer_config.signer_mode { + SignerConfigMode::DryRun => SignerMode::DryRun, + SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, + }; + + debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); + let signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); @@ -287,7 +313,7 @@ impl From for Signer { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, - signer_id: signer_config.signer_id, + mode, signer_addresses: signer_config.signer_entries.signer_addresses.clone(), signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), signer_slot_ids: signer_config.signer_slot_ids.clone(), From 57d796b4d1ae5f3652787fb992c5f453093e63d2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 14 Jan 2025 13:01:53 -0600 Subject: [PATCH 07/82] fix test build --- stacks-signer/src/client/mod.rs | 8 +++++--- stacks-signer/src/client/stackerdb.rs | 20 +++++++++++++++++++- testnet/stacks-node/src/tests/signer/mod.rs | 20 +++----------------- testnet/stacks-node/src/tests/signer/v0.rs | 8 ++++---- 4 files changed, 31 insertions(+), 25 deletions(-) diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index bdaa368567..3c549f4706 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -144,7 +144,7 @@ pub(crate) mod tests { use stacks_common::util::hash::{Hash160, Sha256Sum}; use super::*; - use crate::config::{GlobalConfig, SignerConfig}; + use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; pub struct MockServerClient { pub server: TcpListener, @@ -393,8 +393,10 @@ pub(crate) mod tests { } SignerConfig { reward_cycle, - signer_id: 0, - signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers + signer_mode: SignerConfigMode::Normal { + signer_id: 0, + signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers + }, signer_entries: SignerEntries { signer_addr_to_id, signer_id_to_pk, diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index 1da6618711..bfd7697245 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -82,7 +82,25 @@ impl From<&SignerConfig> for StackerDB { } impl StackerDB { - /// Create a new StackerDB client running in normal operation + #[cfg(any(test, feature = "testing"))] + /// Create a StackerDB client in normal operation (i.e., not a dry-run signer) + pub fn new_normal( + host: &str, + stacks_private_key: StacksPrivateKey, + is_mainnet: bool, + reward_cycle: u64, + signer_slot_id: SignerSlotID, + ) -> Self { + Self::new( + host, + stacks_private_key, + is_mainnet, + reward_cycle, + StackerDBMode::Normal { signer_slot_id }, + ) + } + + /// Create a new StackerDB client fn new( host: &str, stacks_private_key: StacksPrivateKey, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index ebb0990411..c7dc9f2a1b 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -15,20 +15,6 @@ mod v0; use std::collections::HashSet; -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; @@ -696,7 +682,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &self.running_nodes.conf.node.rpc_bind, StacksPrivateKey::new(), // We are just reading so don't care what the key is false, @@ -764,7 +750,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest Date: Tue, 21 Jan 2025 09:25:30 -0600 Subject: [PATCH 08/82] add changelog entry --- CHANGELOG.md | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e2fc5c172..50648d1b79 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,15 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added +- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will + run the signer binary as if it were a registered signer. Instead of broadcasting + `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` + behave normally (e.g., submitting validation requests, submitting finished blocks). A + dry run signer will error out if the supplied key is actually a registered signer. + ## [3.1.0.0.3] ### Added @@ -15,7 +24,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - The RPC endpoint `/v3/block_proposal` no longer will evaluate block proposals more than `block_proposal_max_age_secs` old -- When a transaction is dropped due to replace-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) +- When a transaction is dropped due to replauce-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) - Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) ### Fixed From ea771caa4f8ef04ea2bee95f7b8f46e7d1625437 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:22:26 +0100 Subject: [PATCH 09/82] added heuristic for block rejections timeout --- .../src/nakamoto_node/signer_coordinator.rs | 43 +++++++++++++----- .../src/nakamoto_node/stackerdb_listener.rs | 45 +++++++++---------- testnet/stacks-node/src/tests/signer/v0.rs | 45 +++++++++++++++++++ 3 files changed, 98 insertions(+), 35 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f3df78c66b..d409e73445 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -16,6 +16,7 @@ use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +use std::time::Instant; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -37,7 +38,9 @@ use stacks::util_lib::boot::boot_code_id; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; +use crate::nakamoto_node::stackerdb_listener::{ + BlockStatus, StackerDBListener, EVENT_RECEIVER_POLL, +}; use crate::neon::Counters; use crate::Config; @@ -270,17 +273,19 @@ impl SignerCoordinator { burn_tip: &BlockSnapshot, counters: &Counters, ) -> Result, NakamotoNodeError> { + let mut rejections_timer = Instant::now(); + let mut rejections: u32 = 0; + let mut rejections_timeout = core::time::Duration::from_secs(600); + let mut block_status_tracker = BlockStatus::default(); loop { + /// + /// TODO: describe the logic let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, + &mut block_status_tracker, + rejections_timer, + rejections_timeout, EVENT_RECEIVER_POLL, - |status| { - status.total_weight_signed < self.weight_threshold - && status - .total_reject_weight - .saturating_add(self.weight_threshold) - <= self.total_weight - }, )? { Some(status) => status, None => { @@ -313,10 +318,26 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } + if rejections_timer.elapsed() > rejections_timeout { + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Gave up while tried reaching the threshold".into(), + )); + } + continue; } }; + if rejections != block_status.total_reject_weight { + rejections_timer = Instant::now(); + rejections = block_status.total_reject_weight; + rejections_timeout = core::time::Duration::from_secs_f32( + 600 as f32 + - (600 as f32 + * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), + ); + } + if block_status .total_reject_weight .saturating_add(self.weight_threshold) @@ -334,10 +355,12 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else { + } else if rejections_timer.elapsed() > rejections_timeout { return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unblocked without reaching the threshold".into(), + "Gave up while tried reaching the threshold".into(), )); + } else { + continue; } } } diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 834c59fa95..92688c0075 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -50,7 +50,7 @@ pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFla /// waking up to check timeouts? pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct BlockStatus { pub responded_signers: HashSet, pub gathered_signatures: BTreeMap, @@ -337,10 +337,8 @@ impl StackerDBListener { block.gathered_signatures.insert(slot_id, signature); block.responded_signers.insert(signer_pubkey); - if block.total_weight_signed >= self.weight_threshold { - // Signal to anyone waiting on this block that we have enough signatures - cvar.notify_all(); - } + // Signal to anyone waiting on this block that we have a new status + cvar.notify_all(); // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -378,6 +376,7 @@ impl StackerDBListener { } }; block.responded_signers.insert(rejected_pubkey); + block.total_reject_weight = block .total_reject_weight .checked_add(signer_entry.weight) @@ -396,14 +395,8 @@ impl StackerDBListener { "server_version" => rejected_data.metadata.server_version, ); - if block - .total_reject_weight - .saturating_add(self.weight_threshold) - > self.total_weight - { - // Signal to anyone waiting on this block that we have enough rejections - cvar.notify_all(); - } + // Signal to anyone waiting on this block that we have a new status + cvar.notify_all(); // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -487,30 +480,32 @@ impl StackerDBListenerComms { /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. - /// If the block is found, call `condition` to check if the block status - /// satisfies the condition. - /// If the condition is satisfied, return the block status as - /// `Ok(Some(status))`. - /// If the condition is not satisfied, wait for it to be satisfied. + /// If the block is found, return it. /// If the timeout is reached, return `Ok(None)`. - pub fn wait_for_block_status( + pub fn wait_for_block_status( &self, block_signer_sighash: &Sha512Trunc256Sum, + block_status_tracker: &mut BlockStatus, + rejections_timer: std::time::Instant, + rejections_timeout: Duration, timeout: Duration, - condition: F, - ) -> Result, NakamotoNodeError> - where - F: Fn(&BlockStatus) -> bool, - { + ) -> Result, NakamotoNodeError> { let (lock, cvar) = &*self.blocks; let blocks = lock.lock().expect("FATAL: failed to lock block status"); let (guard, timeout_result) = cvar .wait_timeout_while(blocks, timeout, |map| { + if rejections_timer.elapsed() > rejections_timeout { + return true; + } let Some(status) = map.get(block_signer_sighash) else { return true; }; - condition(status) + if status != block_status_tracker { + *block_status_tracker = status.clone(); + return false; + } + return true; }) .expect("FATAL: failed to wait on block status cond var"); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 190145279f..5b92bd47f7 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7733,6 +7733,51 @@ fn block_validation_response_timeout() { ); } +// Ensures that a signer that successfully submits a block to the node for validation +// will issue ConnectivityIssues rejections if a block submission times out. +// Also ensures that no other proposal gets submitted for validation if we +// are already waiting for a block submission response. +#[test] +#[ignore] +fn block_validation_check_rejection_timeout_heuristic() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let timeout = Duration::from_secs(30); + let sender_sk = Secp256k1PrivateKey::new(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |_| {}, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + signer_test.boot_to_epoch_3(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); + + info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); + signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); +} + /// Test scenario: /// /// - when a signer submits a block validation request and From 6ab756b201a064fcbf3a0bb824a605187e34e894 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:33:14 +0100 Subject: [PATCH 10/82] added rejections_timeout test exposure via BLOCK_REJECTIONS_CURRENT_TIMEOUT --- .../src/nakamoto_node/signer_coordinator.rs | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 146e767b24..f018852e9e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -44,6 +44,24 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; +#[cfg(test)] +use std::time::Duration; + +#[cfg(test)] +use stacks_common::util::tests::TestFlag; + +#[cfg(test)] +use std::sync::LazyLock; + +#[cfg(test)] +/// Test-only value for storing the current rejection based timeout +/// Used to test that the signers will broadcast a block if it gets enough signatures +pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = + LazyLock::new(TestFlag::default); + +/// Base timeout for rejections heuristic +pub static BLOCK_REJECTIONS_TIMEOUT_BASE: u64 = 600; + /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -298,11 +316,12 @@ impl SignerCoordinator { ) -> Result, NakamotoNodeError> { let mut rejections_timer = Instant::now(); let mut rejections: u32 = 0; - let mut rejections_timeout = core::time::Duration::from_secs(600); + let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); let mut block_status_tracker = BlockStatus::default(); loop { /// /// TODO: describe the logic + /// let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, &mut block_status_tracker, @@ -355,10 +374,13 @@ impl SignerCoordinator { rejections_timer = Instant::now(); rejections = block_status.total_reject_weight; rejections_timeout = core::time::Duration::from_secs_f32( - 600 as f32 - - (600 as f32 + BLOCK_REJECTIONS_TIMEOUT_BASE as f32 + - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), ); + + #[cfg(test)] + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(rejections_timeout); } if block_status From bfd47939cce6cab5277c307497fc649e6ed47f53 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 16:55:05 +0100 Subject: [PATCH 11/82] improved test logic --- testnet/stacks-node/src/tests/signer/v0.rs | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c6b2367290..345e9d8e90 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -75,6 +75,7 @@ use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEM use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; +use crate::nakamoto_node::signer_coordinator::BLOCK_REJECTIONS_CURRENT_TIMEOUT; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; @@ -7842,8 +7843,13 @@ fn block_validation_check_rejection_timeout_heuristic() { TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); - info!("------------------------- Test Mine and Verify Confirmed Nakamoto Block -------------------------"); - signer_test.mine_and_verify_confirmed_naka_block(timeout, num_signers, true); + info!("------------------------- Test Mine and Verify Rejected Nakamoto Block -------------------------"); + signer_test.mine_nakamoto_block(timeout, true); + signer_test + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) + .unwrap(); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 400); } /// Test scenario: From 3cebb352e2ff379491b08e5b2077be0b4664da8c Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 17:57:24 +0100 Subject: [PATCH 12/82] fixed integration test --- testnet/stacks-node/src/tests/signer/v0.rs | 46 ++++++++++++++++++++-- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 345e9d8e90..005c09c034 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7840,16 +7840,56 @@ fn block_validation_check_rejection_timeout_heuristic() { signer_test.boot_to_epoch_3(); + // note we just use mined nakamoto_blocks as the second block is not going to be confirmed + + info!("------------------------- Check Rejections-based timeout with 1 rejection -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[4]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ + all_signers[0], + all_signers[1], + all_signers[2], + all_signers[3], + ]); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[4]]) + .unwrap(); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 551); + + info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); - info!("------------------------- Test Mine and Verify Rejected Nakamoto Block -------------------------"); - signer_test.mine_nakamoto_block(timeout, true); + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + signer_test .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 400); + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 404); + + // reset reject/ignore + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); } /// Test scenario: From c3e163e6171934ec0519352d59189c861354957b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:17:28 +0100 Subject: [PATCH 13/82] fixed comment for BLOCK_REJECTIONS_CURRENT_TIMEOUT --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f018852e9e..757d5943d3 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -55,7 +55,6 @@ use std::sync::LazyLock; #[cfg(test)] /// Test-only value for storing the current rejection based timeout -/// Used to test that the signers will broadcast a block if it gets enough signatures pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = LazyLock::new(TestFlag::default); From 6b751c406843ed16709b93cf239fe1dd70f91a1d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:19:35 +0100 Subject: [PATCH 14/82] fixed test comment --- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 30b1ffbe52..ddde8bd5da 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7803,10 +7803,8 @@ fn block_validation_response_timeout() { ); } -// Ensures that a signer that successfully submits a block to the node for validation -// will issue ConnectivityIssues rejections if a block submission times out. -// Also ensures that no other proposal gets submitted for validation if we -// are already waiting for a block submission response. +// Verify that the miner timeout while waiting for signers will change accordingly +// to rejections. #[test] #[ignore] fn block_validation_check_rejection_timeout_heuristic() { From bb8df99742cf37723d3858bcc581d822f6c41b7a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:23:19 +0100 Subject: [PATCH 15/82] fmt --- .../src/nakamoto_node/signer_coordinator.rs | 21 ++++++++----------- 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 757d5943d3..4aabe006e6 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -14,8 +14,12 @@ // along with this program. If not, see . use std::sync::atomic::AtomicBool; +#[cfg(test)] +use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +#[cfg(test)] +use std::time::Duration; use std::time::Instant; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; @@ -34,6 +38,8 @@ use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; +#[cfg(test)] +use stacks_common::util::tests::TestFlag; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -44,15 +50,6 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; -#[cfg(test)] -use std::time::Duration; - -#[cfg(test)] -use stacks_common::util::tests::TestFlag; - -#[cfg(test)] -use std::sync::LazyLock; - #[cfg(test)] /// Test-only value for storing the current rejection based timeout pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = @@ -318,9 +315,9 @@ impl SignerCoordinator { let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); let mut block_status_tracker = BlockStatus::default(); loop { - /// - /// TODO: describe the logic - /// + // At every iteration wait for the block_status. + // Exit when the amount of confirmations/rejections reach the threshold (or until timeout) + // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, &mut block_status_tracker, From 280483b7555ae482cfdec75293e0465a1ad5ad9e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 22 Jan 2025 19:26:50 +0100 Subject: [PATCH 16/82] added more comments --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4aabe006e6..f3044de972 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -310,13 +310,17 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { + // this is used to track the start of the waiting cycle let mut rejections_timer = Instant::now(); + // the amount of current rejections to eventually modify the timeout let mut rejections: u32 = 0; + // default timeout let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { // At every iteration wait for the block_status. - // Exit when the amount of confirmations/rejections reach the threshold (or until timeout) + // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, From 9f1c4314ea02701c6b89b83795305519f1450cb4 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 09:38:46 -0500 Subject: [PATCH 17/82] chore: Apply PR comments from Brice --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 5 +---- stackslib/src/net/tests/download/epoch2x.rs | 9 +++------ testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 +--- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index b8c93d427f..eb6fde3b12 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -112,9 +112,6 @@ fn advance_to_nakamoto( let default_pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); - // Stores the result of a function with side effects, so have Clippy ignore it - #[allow(clippy::collection_is_never_read)] - let mut tip = None; for sortition_height in 0..11 { // stack to pox-3 in cycle 7 let txs = if sortition_height == 6 { @@ -158,7 +155,7 @@ fn advance_to_nakamoto( vec![] }; - tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce)); + peer.tenure_with_txs(&txs, &mut peer_nonce); } // peer is at the start of cycle 8 } diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index ed255c5271..850be9cb5e 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -216,10 +216,10 @@ fn test_get_block_availability() { }) } -fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { +fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { let block_hashes = { let num_headers = end_height - start_height; - let ic = peer.sortdb.as_mut().unwrap().index_conn(); + let ic = peer.sortdb.as_ref().unwrap().index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) .unwrap() @@ -233,7 +233,7 @@ fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) }; let inv = peer - .chainstate() + .chainstate_ref() .get_blocks_inventory(&block_hashes) .unwrap(); inv @@ -476,9 +476,6 @@ where info!("Completed walk round {} step(s)", round); for peer in peers.iter_mut() { - // TODO: Remove if this function has no side effects - let _ = get_blocks_inventory(peer, 0, num_burn_blocks); - let availability = get_peer_availability( peer, first_stacks_block_height - first_sortition_height, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index fb305c72d2..b6c2eb372c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3025,8 +3025,6 @@ fn block_proposal_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let path = format!("{http_origin}/v3/block_proposal"); - // Clippy thinks this is unused, but it seems to be holding a lock - #[allow(clippy::collection_is_never_read)] let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in test_cases.iter().enumerate() @@ -3084,7 +3082,7 @@ fn block_proposal_api_endpoint() { if ix == 1 { // release the test observer mutex so that the handler from 0 can finish! - hold_proposal_mutex.take(); + _ = hold_proposal_mutex.take(); } } From 05b3f92c6c96ff57e4d05c5caded32885037dbf5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 23 Jan 2025 15:51:08 +0100 Subject: [PATCH 18/82] fixed formatting --- .../stacks-node/src/nakamoto_node/signer_coordinator.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index f3044de972..818f1f6a08 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -18,9 +18,7 @@ use std::sync::atomic::AtomicBool; use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; -#[cfg(test)] -use std::time::Duration; -use std::time::Instant; +use std::time::{Duration, Instant}; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -315,7 +313,7 @@ impl SignerCoordinator { // the amount of current rejections to eventually modify the timeout let mut rejections: u32 = 0; // default timeout - let mut rejections_timeout = core::time::Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + let mut rejections_timeout = Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { @@ -373,7 +371,7 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight { rejections_timer = Instant::now(); rejections = block_status.total_reject_weight; - rejections_timeout = core::time::Duration::from_secs_f32( + rejections_timeout = Duration::from_secs_f32( BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), From 2f2cb53f66291d8ee46b2837f97bf5b97df82bfa Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:31:06 -0500 Subject: [PATCH 19/82] chore: Apply Clippy lint `redundant_pattern_matching` --- stackslib/src/burnchains/db.rs | 3 ++- stackslib/src/chainstate/nakamoto/mod.rs | 2 +- stackslib/src/chainstate/stacks/boot/contract_tests.rs | 2 +- stackslib/src/chainstate/stacks/db/blocks.rs | 2 +- stackslib/src/chainstate/stacks/db/mod.rs | 5 +---- stackslib/src/chainstate/stacks/index/file.rs | 2 +- stackslib/src/chainstate/stacks/index/test/marf.rs | 2 +- stackslib/src/clarity_cli.rs | 8 ++++---- stackslib/src/clarity_vm/database/mod.rs | 6 ++++-- stackslib/src/main.rs | 2 +- stackslib/src/net/tests/inv/nakamoto.rs | 2 +- 11 files changed, 18 insertions(+), 18 deletions(-) diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index 2571c532ed..47af6b3ae6 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1193,9 +1193,10 @@ impl BurnchainDB { let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); for op in ops { - if let Some(_) = indexer + if indexer .find_burnchain_header_height(&op.burn_header_hash()) .expect("FATAL: burnchain DB query error") + .is_some() { // this is the op on the canonical fork return Some(op); diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 7584af67d3..c15c48e3fd 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2476,7 +2476,7 @@ impl NakamotoChainState { ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block - if let Some(_) = Self::get_block_header(headers_conn, &block.header.block_id())? { + if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { debug!("Already have block {}", &block.header.block_id()); return Ok(false); } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 2fb95a5ace..11cbc1fd46 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -486,7 +486,7 @@ impl BurnStateDB for TestSimBurnStateDB { height: u32, sortition_id: &SortitionId, ) -> Option<(Vec, u128)> { - if let Some(_) = self.get_burn_header_hash(height, sortition_id) { + if self.get_burn_header_hash(height, sortition_id).is_some() { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 30b38c10cc..d3a4dc8a5a 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5147,7 +5147,7 @@ impl StacksChainState { ) { Ok(miner_rewards_opt) => miner_rewards_opt, Err(e) => { - if let Some(_) = miner_id_opt { + if miner_id_opt.is_some() { return Err(e); } else { let msg = format!("Failed to load miner rewards: {:?}", &e); diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6853ec0ee9..c09b2fcbab 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1839,10 +1839,7 @@ impl StacksChainState { let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; - let init_required = match fs::metadata(&clarity_state_index_marf) { - Ok(_) => false, - Err(_) => true, - }; + let init_required = fs::metadata(&clarity_state_index_marf).is_err(); let state_index = StacksChainState::open_db(mainnet, chain_id, &header_index_root)?; diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 5a7da69e52..3940cb594e 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -213,7 +213,7 @@ impl TrieFile { let mut set_sqlite_tmpdir = false; let mut old_tmpdir_opt = None; if let Some(parent_path) = Path::new(db_path).parent() { - if let Err(_) = env::var("SQLITE_TMPDIR") { + if env::var("SQLITE_TMPDIR").is_err() { debug!( "Sqlite will store temporary migration state in '{}'", parent_path.display() diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 63b2b58968..4f2b06a480 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -2190,7 +2190,7 @@ fn test_marf_begin_from_sentinel_twice() { #[test] fn test_marf_unconfirmed() { let marf_path = "/tmp/test_marf_unconfirmed"; - if let Ok(_) = std::fs::metadata(marf_path) { + if std::fs::metadata(marf_path).is_ok() { std::fs::remove_file(marf_path).unwrap(); } let marf_opts = MARFOpenOpts::default(); diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 9862281b6c..a580e90ee9 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -645,7 +645,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -660,7 +660,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -674,7 +674,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -690,7 +690,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(&conn, id_bhh) { + if get_cli_block_height(&conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 6f770f5927..e03149dba4 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -737,13 +737,15 @@ fn get_first_block_in_tenure( } } None => { - if let Some(_) = get_stacks_header_column_from_table( + if get_stacks_header_column_from_table( conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), false, - ) { + ) + .is_some() + { return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 2e63d0d128..90f6dfeecd 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1523,7 +1523,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if let Some(_) = stacks_blocks_available.get(&stacks_block_id) { + if stacks_blocks_available.get(&stacks_block_id).is_some() { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index cb09236ccb..c313ede598 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -146,7 +146,7 @@ pub fn peer_get_nakamoto_invs<'a>( loop { peer.step_with_ibd(false).unwrap(); - if let Ok(..) = shutdown_recv.try_recv() { + if shutdown_recv.try_recv().is_ok() { break; } } From da64ceca3f2562e07ce6484e508dbd5c2de0650f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:47:38 -0500 Subject: [PATCH 20/82] chore: Apply Clippy lint `single_match` --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +- stackslib/src/burnchains/tests/mod.rs | 9 +- stackslib/src/chainstate/stacks/block.rs | 57 ++-- .../src/chainstate/stacks/boot/pox_2_tests.rs | 29 +- .../src/chainstate/stacks/boot/pox_3_tests.rs | 15 +- .../src/chainstate/stacks/boot/pox_4_tests.rs | 15 +- stackslib/src/chainstate/stacks/db/blocks.rs | 30 +-- stackslib/src/chainstate/stacks/db/mod.rs | 21 +- .../src/chainstate/stacks/index/cache.rs | 14 +- stackslib/src/chainstate/stacks/index/file.rs | 14 +- stackslib/src/chainstate/stacks/index/marf.rs | 5 +- .../src/chainstate/stacks/index/proofs.rs | 12 +- .../src/chainstate/stacks/index/storage.rs | 5 +- .../src/chainstate/stacks/index/test/marf.rs | 16 +- stackslib/src/chainstate/stacks/miner.rs | 39 ++- stackslib/src/chainstate/stacks/mod.rs | 7 +- stackslib/src/chainstate/stacks/tests/mod.rs | 21 +- .../src/chainstate/stacks/transaction.rs | 48 ++-- stackslib/src/core/mempool.rs | 7 +- stackslib/src/net/atlas/mod.rs | 69 +++-- stackslib/src/net/chat.rs | 29 +- stackslib/src/net/connection.rs | 36 ++- stackslib/src/net/dns.rs | 33 +-- stackslib/src/net/download/epoch2x.rs | 28 +- stackslib/src/net/http/request.rs | 34 +-- stackslib/src/net/httpcore.rs | 29 +- stackslib/src/net/inv/epoch2x.rs | 41 ++- stackslib/src/net/mod.rs | 28 +- stackslib/src/net/neighbors/mod.rs | 7 +- stackslib/src/net/p2p.rs | 67 ++--- stackslib/src/net/prune.rs | 37 +-- stackslib/src/net/relay.rs | 79 +++--- stackslib/src/net/server.rs | 87 +++--- stackslib/src/net/tests/convergence.rs | 85 +++--- stackslib/src/net/tests/download/epoch2x.rs | 88 ++---- stackslib/src/net/tests/inv/epoch2x.rs | 198 ++++++-------- stackslib/src/net/tests/inv/nakamoto.rs | 22 +- stackslib/src/net/tests/neighbors.rs | 255 ++++++------------ stackslib/src/net/tests/relay/epoch2x.rs | 25 +- stackslib/src/net/unsolicited.rs | 23 +- stackslib/src/util_lib/mod.rs | 11 +- 41 files changed, 663 insertions(+), 1033 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 129a4b5a91..509eb61e79 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -282,11 +282,8 @@ impl BitcoinIndexer { btc_error::ConnectionError })?; - match self.runtime.sock.take() { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = self.runtime.sock.take() { + let _ = s.shutdown(Shutdown::Both); } self.runtime.sock = Some(s); @@ -294,11 +291,8 @@ impl BitcoinIndexer { } Err(_e) => { let s = self.runtime.sock.take(); - match s { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = s { + let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) } @@ -932,11 +926,8 @@ impl BitcoinIndexer { impl Drop for BitcoinIndexer { fn drop(&mut self) { - match self.runtime.sock { - Some(ref mut s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(ref mut s) = self.runtime.sock { + let _ = s.shutdown(Shutdown::Both); } } } diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 23232ac3b4..91ae93bb3f 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -580,12 +580,9 @@ impl TestBurnchainBlock { assert_eq!(parent_snapshot.block_height + 1, self.block_height); for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); } } } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index a335e21894..1c231b8efc 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -353,16 +353,13 @@ impl StacksMessageCodec for StacksBlock { // must be only one coinbase let mut coinbase_count = 0; for tx in txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - coinbase_count += 1; - if coinbase_count > 1 { - return Err(codec_error::DeserializeError( - "Invalid block: multiple coinbases found".to_string(), - )); - } + if let TransactionPayload::Coinbase(..) = tx.payload { + coinbase_count += 1; + if coinbase_count > 1 { + return Err(codec_error::DeserializeError( + "Invalid block: multiple coinbases found".to_string(), + )); } - _ => {} } } @@ -518,26 +515,23 @@ impl StacksBlock { let mut found_coinbase = false; let mut coinbase_index = 0; for (i, tx) in txs.iter().enumerate() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - if !check_present { - warn!("Found unexpected coinbase tx {}", tx.txid()); - return false; - } - - if found_coinbase { - warn!("Found duplicate coinbase tx {}", tx.txid()); - return false; - } - - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { - warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); - return false; - } - found_coinbase = true; - coinbase_index = i; + if let TransactionPayload::Coinbase(..) = tx.payload { + if !check_present { + warn!("Found unexpected coinbase tx {}", tx.txid()); + return false; + } + + if found_coinbase { + warn!("Found duplicate coinbase tx {}", tx.txid()); + return false; } - _ => {} + + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { + warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); + return false; + } + found_coinbase = true; + coinbase_index = i; } } @@ -1150,11 +1144,8 @@ mod test { let mut txs_anchored = vec![]; for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); } diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 3313e80c7f..4256fba3b9 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1371,23 +1371,20 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == charlie_address { - assert!( - r.execution_cost != ExecutionCost::ZERO, - "Execution cost is not zero!" - ); - charlie_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + assert!( + r.execution_cost != ExecutionCost::ZERO, + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 5c52297969..d42095b923 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -930,16 +930,13 @@ fn pox_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 840e7a2c54..5005dd8781 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -9184,16 +9184,13 @@ fn missed_slots_no_unlock() { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index d3a4dc8a5a..6c3c745a45 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -11206,15 +11206,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = @@ -11889,15 +11886,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index c09b2fcbab..1d7c97b676 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,11 +2746,8 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances @@ -2866,11 +2863,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = @@ -2956,11 +2950,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index d5ba5ae5f6..2d5cd556b8 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -258,12 +258,11 @@ impl TrieCache { TrieCache::Everything(ref mut state) => { state.store_node_and_hash(block_id, trieptr, node, hash); } - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node_and_hash(block_id, trieptr, TrieNodeType::Node256(data), hash); } - _ => {} - }, + } } } @@ -273,12 +272,11 @@ impl TrieCache { match self { TrieCache::Noop(_) => {} TrieCache::Everything(ref mut state) => state.store_node(block_id, trieptr, node), - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node(block_id, trieptr, TrieNodeType::Node256(data)) } - _ => {} - }, + } } } diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 3940cb594e..52f571aa1f 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -194,11 +194,8 @@ impl TrieFile { .map(|stat| Some(stat.len())) .unwrap_or(None); - match (size_before_opt, size_after_opt) { - (Some(sz_before), Some(sz_after)) => { - debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); - } - _ => {} + if let (Some(sz_before), Some(sz_after)) = (size_before_opt, size_after_opt) { + debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); } Ok(()) @@ -461,11 +458,8 @@ impl TrieFile { self.write_all(buf)?; self.flush()?; - match self { - TrieFile::Disk(ref mut data) => { - data.fd.sync_data()?; - } - _ => {} + if let TrieFile::Disk(ref mut data) = self { + data.fd.sync_data()?; } Ok(offset) } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index de1488d057..cfb5a97594 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1291,9 +1291,8 @@ impl MARF { // used in testing in order to short-circuit block-height lookups // when the trie struct is tested outside of marf.rs usage if height == 0 { - match storage.test_genesis_block { - Some(ref s) => return Ok(Some(s.clone())), - _ => {} + if let Some(ref s) = storage.test_genesis_block { + return Ok(Some(s.clone())); } } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index 37ff420437..e7ba01a6bf 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -1213,12 +1213,12 @@ impl TrieMerkleProof { }; // next proof item should be part of a segment proof - match proof[i] { - TrieMerkleProofType::Shunt(_) => { - test_debug!("Malformed proof -- exepcted segment proof following first shunt proof head at {}", i); - return false; - } - _ => {} + if let TrieMerkleProofType::Shunt(_) = proof[i] { + test_debug!( + "Malformed proof -- exepcted segment proof following first shunt proof head at {}", + i + ); + return false; } while i < proof.len() { diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 79b391ce42..fb9637c799 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -1887,9 +1887,8 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // blow away db trie_sql::clear_tables(self.sqlite_tx())?; - match self.data.uncommitted_writes { - Some((_, ref mut trie_storage)) => trie_storage.format()?, - None => {} + if let Some((_, ref mut trie_storage)) = self.data.uncommitted_writes { + trie_storage.format()? }; self.data.set_block(T::sentinel(), None); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 4f2b06a480..7102527ba8 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,11 +1282,8 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); let f = TrieFileStorage::open(&path, marf_opts).unwrap(); @@ -1567,12 +1564,9 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - match fs::metadata(&path) { - Err(_) => { - eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); - return; - } - Ok(_) => {} + if let Err(_) = fs::metadata(&path) { + eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); + return; }; let marf_opts = MARFOpenOpts::default(); let mut f_store = TrieFileStorage::new_memory(marf_opts).unwrap(); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 43fd6b3c18..aca3f9d84c 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1147,24 +1147,20 @@ impl<'a> StacksMicroblockBuilder<'a> { TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { error, .. }) => { test_debug!("Exclude tx {} from microblock", tx.txid()); - match &error { - Error::BlockTooBigError => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - test_debug!("Block budget exceeded on tx {}", &tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - test_debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit - == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - test_debug!( - "Stop mining microblock block due to limit exceeded" - ); - break; - } + if let Error::BlockTooBigError = &error { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + test_debug!("Block budget exceeded on tx {}", &tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + test_debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + test_debug!( + "Stop mining microblock block due to limit exceeded" + ); + break; } - _ => {} } continue; } @@ -1198,12 +1194,9 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.considered.replace(considered); self.runtime.num_mined = num_txs; - match result { - Err(e) => { - warn!("Error producing microblock: {}", e); - return Err(e); - } - _ => {} + if let Err(e) = result { + warn!("Error producing microblock: {}", e); + return Err(e); } return self.make_next_microblock(txs_included, miner_key, tx_events, None); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index 23990fe199..546ab6bd08 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1590,11 +1590,8 @@ pub mod test { } for tx in all_txs.into_iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); if txs_anchored.len() >= num_txs { diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 54dcea1c7e..d119dacd8e 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,11 +338,8 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - match fs::metadata(&chainstate_path(new_test_name)) { - Ok(_) => { - fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } copy_dir( @@ -525,17 +522,14 @@ impl TestStacksNode { miner: &TestMiner, ) -> Option { for commit_op in miner.block_commits.iter().rev() { - match SortitionDB::get_block_snapshot_for_winning_stacks_block( + if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() { - Some(sn) => { - return Some(sn); - } - None => {} + return Some(sn); } } return None; @@ -1424,11 +1418,8 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&path) { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index c0fa7f1727..0308a8124b 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1130,17 +1130,14 @@ impl StacksTransactionSigner { } pub fn sign_sponsor(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { - match self.tx.auth { - TransactionAuth::Sponsored(_, ref sponsor_condition) => { - if self.check_oversign - && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() - { - return Err(net_error::SigningError( - "Sponsor would have too many signatures".to_string(), - )); - } + if let TransactionAuth::Sponsored(_, ref sponsor_condition) = self.tx.auth { + if self.check_oversign + && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() + { + return Err(net_error::SigningError( + "Sponsor would have too many signatures".to_string(), + )); } - _ => {} } let next_sighash = self.tx.sign_next_sponsor(&self.sighash, privk)?; @@ -1933,24 +1930,21 @@ mod test { // test_debug!("mutate byte {}", &i); let mut cursor = io::Cursor::new(&tx_bytes); let mut reader = LogReader::from_reader(&mut cursor); - match StacksTransaction::consensus_deserialize(&mut reader) { - Ok(corrupt_tx) => { - let mut corrupt_tx_bytes = vec![]; - corrupt_tx - .consensus_serialize(&mut corrupt_tx_bytes) - .unwrap(); - if corrupt_tx_bytes.len() < tx_bytes.len() { - // didn't parse fully; the block-parsing logic would reject this block. - tx_bytes[i] = next_byte as u8; - continue; - } - if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { - eprintln!("corrupt tx: {:#?}", &corrupt_tx); - eprintln!("signed tx: {:#?}", &signed_tx); - assert!(false); - } + if let Ok(corrupt_tx) = StacksTransaction::consensus_deserialize(&mut reader) { + let mut corrupt_tx_bytes = vec![]; + corrupt_tx + .consensus_serialize(&mut corrupt_tx_bytes) + .unwrap(); + if corrupt_tx_bytes.len() < tx_bytes.len() { + // didn't parse fully; the block-parsing logic would reject this block. + tx_bytes[i] = next_byte as u8; + continue; + } + if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { + eprintln!("corrupt tx: {:#?}", &corrupt_tx); + eprintln!("signed tx: {:#?}", &signed_tx); + assert!(false); } - Err(_) => {} } // restore tx_bytes[i] = next_byte as u8; diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index 589b624abe..17848fa2d2 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1092,11 +1092,8 @@ impl NonceCache { }; // In-memory cache - match self.cache.get_mut(&address) { - Some(nonce) => { - *nonce = value; - } - None => (), + if let Some(nonce) = self.cache.get_mut(&address) { + *nonce = value; } success diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c382aa618d..49d1036a0b 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -195,45 +195,42 @@ impl AttachmentInstance { ) -> Option { if let Value::Tuple(ref attachment) = value { if let Ok(Value::Tuple(ref attachment_data)) = attachment.get("attachment") { - match ( + if let ( + Ok(Value::Sequence(SequenceData::Buffer(content_hash))), + Ok(Value::UInt(attachment_index)), + ) = ( attachment_data.get("hash"), attachment_data.get("attachment-index"), ) { - ( - Ok(Value::Sequence(SequenceData::Buffer(content_hash))), - Ok(Value::UInt(attachment_index)), - ) => { - let content_hash = if content_hash.data.is_empty() { - Hash160::empty() - } else { - match Hash160::from_bytes(&content_hash.data[..]) { - Some(content_hash) => content_hash, - _ => return None, - } - }; - let metadata = match attachment_data.get("metadata") { - Ok(metadata) => { - let mut serialized = vec![]; - metadata - .consensus_serialize(&mut serialized) - .expect("FATAL: invalid metadata"); - to_hex(&serialized[..]) - } - _ => String::new(), - }; - let instance = AttachmentInstance { - index_block_hash, - content_hash, - attachment_index: *attachment_index as u32, - stacks_block_height, - metadata, - contract_id: contract_id.clone(), - tx_id, - canonical_stacks_tip_height, - }; - return Some(instance); - } - _ => {} + let content_hash = if content_hash.data.is_empty() { + Hash160::empty() + } else { + match Hash160::from_bytes(&content_hash.data[..]) { + Some(content_hash) => content_hash, + _ => return None, + } + }; + let metadata = match attachment_data.get("metadata") { + Ok(metadata) => { + let mut serialized = vec![]; + metadata + .consensus_serialize(&mut serialized) + .expect("FATAL: invalid metadata"); + to_hex(&serialized[..]) + } + _ => String::new(), + }; + let instance = AttachmentInstance { + index_block_hash, + content_hash, + attachment_index: *attachment_index as u32, + stacks_block_height, + metadata, + contract_id: contract_id.clone(), + tx_id, + canonical_stacks_tip_height, + }; + return Some(instance); } } } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 2dea34245b..0ce27038cd 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -515,13 +515,12 @@ impl Neighbor { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer use std::env; - match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) { - Ok(asn_str) => { - neighbor.asn = asn_str.parse().unwrap(); - neighbor.org = neighbor.asn; - test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); - } - Err(_) => {} + if let Ok(asn_str) = + env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port).to_string()) + { + neighbor.asn = asn_str.parse().unwrap(); + neighbor.org = neighbor.asn; + test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); }; } @@ -544,13 +543,10 @@ impl Neighbor { let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes).map_err(net_error::DBError)?; - match asn_opt { - Some(a) => { - if a != 0 { - peer.asn = a; - } + if let Some(a) = asn_opt { + if a != 0 { + peer.asn = a; } - None => {} }; } Ok(Some(peer)) @@ -3110,11 +3106,8 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 1d0eabdd14..0fe48a678b 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -926,19 +926,16 @@ impl ConnectionInbox

{ let bytes_consumed = if let Some(ref mut preamble) = preamble_opt { let (message_opt, bytes_consumed) = self.consume_payload(protocol, preamble, &buf[offset..])?; - match message_opt { - Some(message) => { - // queue up - test_debug!( - "Consumed message '{}' (request {}) in {} bytes", - message.get_message_name(), - message.request_id(), - bytes_consumed - ); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!( + "Consumed message '{}' (request {}) in {} bytes", + message.get_message_name(), + message.request_id(), + bytes_consumed + ); + self.inbox.push_back(message); + consumed_message = true; }; bytes_consumed @@ -982,14 +979,11 @@ impl ConnectionInbox

{ if let Some(ref mut preamble) = preamble_opt { let (message_opt, _bytes_consumed) = self.consume_payload(protocol, preamble, &[])?; - match message_opt { - Some(message) => { - // queue up - test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); + self.inbox.push_back(message); + consumed_message = true; } } self.preamble = preamble_opt; diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index 6529001d7d..77c401d5e2 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -377,13 +377,10 @@ mod test { let mut resolved_addrs = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("www.google.com", 80).unwrap() { - Some(addrs) => { - test_debug!("addrs: {:?}", &addrs); - resolved_addrs = Some(addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup("www.google.com", 80).unwrap() { + test_debug!("addrs: {:?}", &addrs); + resolved_addrs = Some(addrs); + break; } sleep_ms(100); } @@ -423,13 +420,10 @@ mod test { if resolved_addrs.contains_key(&name.to_string()) { continue; } - match client.poll_lookup(name, 80).unwrap() { - Some(addrs) => { - test_debug!("name {} addrs: {:?}", name, &addrs); - resolved_addrs.insert(name.to_string(), addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup(name, 80).unwrap() { + test_debug!("name {} addrs: {:?}", name, &addrs); + resolved_addrs.insert(name.to_string(), addrs); + break; } } @@ -452,13 +446,10 @@ mod test { let mut resolved_error = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("asdfjkl;", 80).unwrap() { - Some(resp) => { - test_debug!("addrs: {:?}", &resp); - resolved_error = Some(resp); - break; - } - None => {} + if let Some(resp) = client.poll_lookup("asdfjkl;", 80).unwrap() { + test_debug!("addrs: {:?}", &resp); + resolved_error = Some(resp); + break; } sleep_ms(100); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index 6d0bb63d5a..06f4e146fa 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -1045,9 +1045,8 @@ impl PeerNetwork { /// Pass a hint to the downloader to re-scan pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { - match self.block_downloader { - Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), - None => {} + if let Some(ref mut dl) = self.block_downloader { + dl.hint_download_rescan(target_height, ibd) } } @@ -1978,11 +1977,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2016,11 +2014,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2480,9 +2477,8 @@ impl PeerNetwork { if done { // reset state if we're done - match self.block_downloader { - Some(ref mut downloader) => downloader.reset(), - None => {} + if let Some(ref mut downloader) = self.block_downloader { + downloader.reset() } } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index e2d0fd16f3..13daa56cab 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -273,29 +273,23 @@ impl StacksMessageCodec for HttpRequestPreamble { .map_err(CodecError::WriteError)?; // content-type - match self.content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(c.to_string().as_str().as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(ref c) = self.content_type { + fd.write_all("Content-Type: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(c.to_string().as_str().as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // content-length - match self.content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(l) = self.content_length { + fd.write_all("Content-Length: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(format!("{}", l).as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // keep-alive diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index a38f35c005..919d7ffa1c 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1232,25 +1232,22 @@ impl StacksHttp { /// This method will set up this state machine to consume the message associated with this /// premable, if the response is chunked. fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() && !self.allow_arbitrary_response { - return Err(NetError::DeserializeError( - "Unexpected HTTP response: no active request handler".to_string(), - )); + if let StacksHttpPreamble::Response(ref http_response_preamble) = preamble { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); } - if http_response_preamble.is_chunked() { - // we can only receive one response at a time - if self.reply.is_some() { - test_debug!("Have pending reply already"); - return Err(NetError::InProgress); - } - self.set_pending(http_response_preamble); - } + self.set_pending(http_response_preamble); } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 9b9e7b3682..449d2e26e7 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1534,15 +1534,12 @@ impl PeerNetwork { } // does the peer agree with our PoX view up to this reward cycle? - match stats.inv.pox_inv_cmp(&self.pox_id) { - Some((disagreed, _, _)) => { - if disagreed < target_block_reward_cycle { - // can't proceed - debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); - return Ok(0); - } + if let Some((disagreed, _, _)) = stats.inv.pox_inv_cmp(&self.pox_id) { + if disagreed < target_block_reward_cycle { + // can't proceed + debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); + return Ok(0); } - None => {} } let target_block_height = self @@ -2523,13 +2520,10 @@ impl PeerNetwork { let mut cur_neighbors = HashSet::new(); for (nk, event_id) in self.events.iter() { // only outbound authenticated peers - match self.peers.get(event_id) { - Some(convo) => { - if convo.is_outbound() && convo.is_authenticated() { - cur_neighbors.insert(nk.clone()); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.is_outbound() && convo.is_authenticated() { + cur_neighbors.insert(nk.clone()); } - None => {} } } @@ -2543,17 +2537,14 @@ impl PeerNetwork { /// Set a hint that we learned something new, and need to sync invs again pub fn hint_sync_invs(&mut self, target_height: u64) { - match self.inv_state { - Some(ref mut inv_state) => { - debug!( - "Awaken inv sync to re-scan peer block inventories at height {}", - target_height - ); - inv_state.hint_learned_data = true; - inv_state.hint_do_rescan = true; - inv_state.hint_learned_data_height = target_height; - } - None => {} + if let Some(ref mut inv_state) = self.inv_state { + debug!( + "Awaken inv sync to re-scan peer block inventories at height {}", + target_height + ); + inv_state.hint_learned_data = true; + inv_state.hint_do_rescan = true; + inv_state.hint_learned_data_height = target_height; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index cfefa2c5fe..a2461631a6 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2376,11 +2376,8 @@ pub mod test { if self.closed { return Ok(0); } - match self.read_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.read_error { + return Err(io::Error::from((*e).clone())); } let sz = self.c.read(buf)?; @@ -2403,11 +2400,8 @@ pub mod test { if self.closed { return Err(io::Error::from(ErrorKind::Other)); // EBADF } - match self.write_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.write_error { + return Err(io::Error::from((*e).clone())); } self.c.write(buf) } @@ -2799,11 +2793,8 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if let Ok(_) = fs::metadata(&test_path) { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); @@ -3559,11 +3550,8 @@ pub mod test { ch: &ConsensusHash, ) { for op in blockstack_ops.iter_mut() { - match op { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - data.consensus_hash = (*ch).clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); } } } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index cc3fd73db8..f0d3cf18b7 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -388,11 +388,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier table size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier table size: {}", &self.local_peer, count); }; debug!("{:?}: Walk finished ===================", &self.local_peer); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 78c8982106..3180c3a3dd 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1145,13 +1145,10 @@ impl PeerNetwork { ) -> u64 { let mut ret = 0; for (_, socket) in sockets.iter() { - match socket.peer_addr() { - Ok(addr) => { - if addr.ip() == ipaddr.ip() { - ret += 1; - } + if let Ok(addr) = socket.peer_addr() { + if addr.ip() == ipaddr.ip() { + ret += 1; } - Err(_) => {} }; } ret @@ -1378,12 +1375,9 @@ impl PeerNetwork { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { info!("Request to ban {:?}", neighbor_key); - match self.events.get(neighbor_key) { - Some(event_id) => { - debug!("Will ban {:?} (event {})", neighbor_key, event_id); - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor_key) { + debug!("Will ban {:?} (event {})", neighbor_key, event_id); + self.bans.insert(*event_id); } } Ok(()) @@ -1466,28 +1460,25 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { - match self.handles.get(i) { - Some(ref handle) => { - loop { - // drain all inbound requests - let inbound_request_res = handle.chan_in.try_recv(); - match inbound_request_res { - Ok(inbound_request) => { - messages.push((i, inbound_request)); - } - Err(TryRecvError::Empty) => { - // nothing to do - break; - } - Err(TryRecvError::Disconnected) => { - // dead; remove - to_remove.push(i); - break; - } + if let Some(ref handle) = self.handles.get(i) { + loop { + // drain all inbound requests + let inbound_request_res = handle.chan_in.try_recv(); + match inbound_request_res { + Ok(inbound_request) => { + messages.push((i, inbound_request)); + } + Err(TryRecvError::Empty) => { + // nothing to do + break; + } + Err(TryRecvError::Disconnected) => { + // dead; remove + to_remove.push(i); + break; } } } - None => {} } } @@ -1885,11 +1876,8 @@ impl PeerNetwork { /// Deregister a socket from our p2p network instance. fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { - match self.network { - Some(ref mut network) => { - let _ = network.deregister(event_id, &socket); - } - None => {} + if let Some(ref mut network) = self.network { + let _ = network.deregister(event_id, &socket); } } @@ -1969,11 +1957,8 @@ impl PeerNetwork { /// Deregister and ban a neighbor pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); - match self.events.get(neighbor) { - Some(event_id) => { - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor) { + self.bans.insert(*event_id); } self.relayer_stats.process_neighbor_ban(neighbor); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index f178ea719a..c58e1b210a 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -322,18 +322,15 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(&event_id) { - Some(ref convo) => { - if !convo.stats.outbound { - let stats = convo.stats.clone(); - if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { - entry.push((*event_id, nk.clone(), stats)); - } else { - ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); - } + if let Some(ref convo) = self.peers.get(&event_id) { + if !convo.stats.outbound { + let stats = convo.stats.clone(); + if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { + entry.push((*event_id, nk.clone(), stats)); + } else { + ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); } } - None => {} } } @@ -378,15 +375,12 @@ impl PeerNetwork { let mut outbound: Vec = vec![]; for (nk, event_id) in self.events.iter() { - match self.peers.get(event_id) { - Some(convo) => { - if convo.stats.outbound { - outbound.push(format!("{:?}", &nk)); - } else { - inbound.push(format!("{:?}", &nk)); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.stats.outbound { + outbound.push(format!("{:?}", &nk)); + } else { + inbound.push(format!("{:?}", &nk)); } - None => {} } } (inbound, outbound) @@ -464,11 +458,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier size: {}", &self.local_peer, count); }; } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4569585b79..cadfb75f1e 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1826,52 +1826,49 @@ impl Relayer { &tx.txid(), &ast_rules ); - match tx.payload { - TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) => { - let clarity_version = - clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = + tx.payload + { + let clarity_version = + clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); + + if ast_rules == ASTRules::PrecheckSize { + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = QualifiedContractIdentifier::new( - issuer_principal, - smart_contract.name.clone(), - ); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 78f0f6fbb5..2459f64c00 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -429,56 +429,52 @@ impl HttpPeer { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; let recv_res = convo.recv(client_sock); - match recv_res { - Err(e) => { - match e { - net_error::PermanentlyDrained => { - // socket got closed, but we might still have pending unsolicited messages - debug!( - "Remote HTTP peer disconnected event {} (socket {:?})", - event_id, &client_sock - ); - convo_dead = true; - } - net_error::InvalidMessage => { - // got sent bad data. If this was an inbound conversation, send it a HTTP - // 400 and close the socket. - debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error(StacksHttpResponse::new_empty_error( - &HttpBadRequest::new( - "Received an HTTP message that the node could not decode" - .to_string(), - ), - )) { - Ok(_) => { - // prime the socket - if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - // convo_dead = true; - } - } - Err(e) => { + if let Err(e) = recv_res { + match e { + net_error::PermanentlyDrained => { + // socket got closed, but we might still have pending unsolicited messages + debug!( + "Remote HTTP peer disconnected event {} (socket {:?})", + event_id, &client_sock + ); + convo_dead = true; + } + net_error::InvalidMessage => { + // got sent bad data. If this was an inbound conversation, send it a HTTP + // 400 and close the socket. + debug!("Got a bad HTTP message on socket {:?}", &client_sock); + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode".to_string(), + ), + )) { + Ok(_) => { + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { debug!( - "Failed to reply HTTP 400 to socket {:?}: {:?}", + "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } + Err(e) => { + debug!( + "Failed to reply HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; + } } - _ => { - debug!( - "Failed to receive HTTP data on event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + } + _ => { + debug!( + "Failed to receive HTTP data on event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } - Ok(_) => {} } // react to inbound messages -- do we need to send something out, or fulfill requests @@ -730,11 +726,8 @@ mod test { peer.step().unwrap(); // asked to yield? - match http_rx.try_recv() { - Ok(_) => { - break; - } - Err(_) => {} + if let Ok(_) = http_rx.try_recv() { + break; } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index be35c4e1f1..a607298d74 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -218,7 +218,7 @@ fn test_walk_ring_15_org_biased() { let peers = test_walk_ring(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -226,11 +226,8 @@ fn test_walk_ring_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -398,7 +395,7 @@ fn test_walk_line_15_org_biased() { let peers = test_walk_line(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -406,11 +403,8 @@ fn test_walk_line_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -634,7 +628,7 @@ fn test_walk_star_15_org_biased() { let peers = test_walk_star(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -642,11 +636,8 @@ fn test_walk_star_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -849,14 +840,11 @@ fn dump_peers(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); } - None => {} } } @@ -882,16 +870,13 @@ fn dump_peer_histograms(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); } - None => {} } } for inbound in inbound_neighbor_index.iter() { @@ -1001,32 +986,26 @@ fn run_topology_test_ex( debug!("Step peer {:?}", &nk); // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } + if let Some(ref peer_list) = initial_allowed.get(&nk) { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); } } - None => {} }; // denied peers are never connected - match initial_denied.get(&nk) { - Some(ref peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } + if let Some(ref peer_list) = initial_denied.get(&nk) { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); } } - None => {} }; // all ports are unique in the p2p socket table diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 5c13a12a50..e21ce19c35 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -171,20 +171,14 @@ fn test_get_block_availability() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -566,12 +560,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -847,12 +838,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -938,12 +926,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1012,12 +997,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1094,12 +1076,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1173,12 +1152,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_sockets() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1260,11 +1236,8 @@ pub fn test_get_blocks_and_microblocks_ban_url() { |_| {}, |peer| { let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + blocked = dl.blocked_urls.len(); } if blocked >= 1 { // NOTE: this is the success criterion @@ -1481,12 +1454,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index 44a4bf3967..b2136bd1f0 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1390,22 +1390,16 @@ fn test_sync_inv_2_peers_plain() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -1553,46 +1547,38 @@ fn test_sync_inv_2_peers_stale() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { - if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - for i in 0..first_stacks_block_height { - assert!(!peer_2_inv.inv.has_ith_block(i)); - assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); - } - peer_2_check = true; + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if peer_2_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + for i in 0..first_stacks_block_height { + assert!(!peer_2_inv.inv.has_ith_block(i)); + assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); } + peer_2_check = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { - if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - peer_1_check = true; - } + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(ref peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if peer_1_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + peer_1_check = true; } } - None => {} } round += 1; @@ -1703,54 +1689,48 @@ fn test_sync_inv_2_peers_unstable() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_1_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_1_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { - peer_1_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { - peer_1_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_1_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_1_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { + peer_1_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { + peer_1_block_cycle = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_2_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_2_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { - peer_2_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { - peer_2_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_2_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_2_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { + peer_2_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { + peer_2_block_cycle = true; } } - None => {} } round += 1; @@ -1917,42 +1897,30 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let _ = peer_2.step(); // peer 1 should see that peer 2 has all blocks for reward cycles 5 through 9 - match peer_1.network.inv_state { - Some(ref inv) => { - inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); - peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); + peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); }; // peer 2 should see that peer 1 has all blocks up to where we stopped feeding them to // it - match peer_2.network.inv_state { - Some(ref inv) => { - inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); - peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); + peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index c313ede598..94f1eb8124 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -1089,22 +1089,16 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .unwrap_or(0); // nothing should break - match peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match other_peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = other_peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1e3fa76cb..8c56b48b0d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -68,20 +68,14 @@ fn test_step_walk_1_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -184,22 +178,16 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; if let Some(s) = peer_1 @@ -306,20 +294,14 @@ fn test_step_walk_1_neighbor_denied() { walk_1_retries = peer_1.network.walk_retries; walk_2_retries = peer_2.network.walk_retries; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -400,20 +382,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { walk_1_retries = peer_1.network.walk_attempts; walk_2_retries = peer_2.network.walk_attempts; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -463,20 +439,14 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -573,23 +543,17 @@ fn test_step_walk_1_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 2 never gets added to peer 1's frontier - assert!(!w.frontier.contains_key(&neighbor_2.addr)); - } - None => {} + // peer 2 never gets added to peer 1's frontier + assert!(!w.frontier.contains_key(&neighbor_2.addr)); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -657,23 +621,17 @@ fn test_step_walk_1_neighbor_behind() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 1 never gets added to peer 2's frontier - assert!(!w.frontier.contains_key(&neighbor_1.addr)); - } - None => {} + // peer 1 never gets added to peer 2's frontier + assert!(!w.frontier.contains_key(&neighbor_1.addr)); }; i += 1; @@ -789,20 +747,14 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -942,20 +894,14 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; steps += 1; @@ -1091,20 +1037,14 @@ fn test_step_walk_2_neighbors_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -1371,28 +1311,19 @@ fn test_step_walk_3_neighbors_inbound() { ); test_debug!("========"); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_3.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_3.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; for (i, peer) in [&peer_1, &peer_2, &peer_3].iter().enumerate() { @@ -1542,20 +1473,14 @@ fn test_step_walk_2_neighbors_rekey() { let _ = peer_1.step(); let _ = peer_2.step(); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; } @@ -1649,20 +1574,14 @@ fn test_step_walk_2_neighbors_different_networks() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index ddf4e92598..8fdbfb846d 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1702,23 +1702,17 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut peer_0_to_1 = false; let mut peer_1_to_0 = false; for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } + if let Some(convo) = peers[0].network.peers.get(event_id) { + if *nk == peer_1_nk { + peer_0_to_1 = true; } - None => {} } } for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } + if let Some(convo) = peers[1].network.peers.get(event_id) { + if *nk == peer_0_nk { + peer_1_to_0 = true; } - None => {} } } @@ -3732,17 +3726,14 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( + if let Err(e) = node.chainstate.will_admit_mempool_tx( &sortdb.index_handle(&tip.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, versioned_contract_len as u64, ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} + panic!("will_admit_mempool_tx {:?}", &e); }; peer.sortdb = Some(sortdb); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index e7f1c256a4..922332bedd 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -481,21 +481,18 @@ impl PeerNetwork { if need_block { // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); + if let Some(ref mut downloader) = self.block_downloader { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); - } - self.have_data_to_download = true; + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } - None => {} + self.have_data_to_download = true; } } } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 87031676db..af9a4d98a7 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -32,13 +32,10 @@ pub mod test { let mut done = false; while get_epoch_time_secs() <= deadline { sleep_ms(1000); - match rx.try_recv() { - Ok(success) => { - assert!(success); - done = true; - break; - } - Err(_) => {} + if let Ok(success) = rx.try_recv() { + assert!(success); + done = true; + break; } } From 34e34ef5f5ec489d5a3040bc866c5b49b9c3fc10 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 13:56:01 -0500 Subject: [PATCH 21/82] chore: Apply Clippy lint `redundant_pattern_matching` again --- stackslib/src/chainstate/stacks/db/mod.rs | 6 +++--- stackslib/src/chainstate/stacks/index/test/marf.rs | 4 ++-- stackslib/src/chainstate/stacks/tests/mod.rs | 4 ++-- stackslib/src/net/chat.rs | 2 +- stackslib/src/net/mod.rs | 2 +- stackslib/src/net/server.rs | 2 +- 6 files changed, 10 insertions(+), 10 deletions(-) diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 1d7c97b676..5821f47394 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -2746,7 +2746,7 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2863,7 +2863,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; @@ -2950,7 +2950,7 @@ pub mod test { }; let path = chainstate_path(function_name!()); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index 7102527ba8..a721b2dce4 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1282,7 +1282,7 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); @@ -1564,7 +1564,7 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - if let Err(_) = fs::metadata(&path) { + if fs::metadata(&path).is_err() { eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); return; }; diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index d119dacd8e..29207dce6a 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,7 +338,7 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - if let Ok(_) = fs::metadata(&chainstate_path(new_test_name)) { + if fs::metadata(&chainstate_path(new_test_name)).is_ok() { fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } @@ -1418,7 +1418,7 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - if let Ok(_) = fs::metadata(&path) { + if fs::metadata(&path).is_ok() { fs::remove_dir_all(&path).unwrap(); }; diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 0ce27038cd..8fcc7cdf41 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -3106,7 +3106,7 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index a2461631a6..0959d2ff35 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2793,7 +2793,7 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(&config); - if let Ok(_) = fs::metadata(&test_path) { + if fs::metadata(&test_path).is_ok() { fs::remove_dir_all(&test_path).unwrap(); }; diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 2459f64c00..05d831ca7a 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -726,7 +726,7 @@ mod test { peer.step().unwrap(); // asked to yield? - if let Ok(_) = http_rx.try_recv() { + if http_rx.try_recv().is_ok() { break; } } From 9971d1a40d5f1accb013565e545633ad5489f589 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 24 Jan 2025 08:17:33 +0100 Subject: [PATCH 22/82] added wanr! when timeout --- .../src/nakamoto_node/signer_coordinator.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 818f1f6a08..ce136a05a2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -359,6 +359,12 @@ impl SignerCoordinator { } if rejections_timer.elapsed() > rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( "Gave up while tried reaching the threshold".into(), )); @@ -399,6 +405,12 @@ impl SignerCoordinator { ); return Ok(block_status.gathered_signatures.values().cloned().collect()); } else if rejections_timer.elapsed() > rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( "Gave up while tried reaching the threshold".into(), )); From 7e2d60e3f9e6759918e55cb490e60eb9b2e732e8 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 12:09:50 -0500 Subject: [PATCH 23/82] chore: Address PR comments from Aaron --- stackslib/src/burnchains/bitcoin/indexer.rs | 21 +++++++++------------ stackslib/src/burnchains/tests/mod.rs | 4 ++-- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 2a19074aef..899c96390c 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,34 +264,31 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + s.set_nodelay(true).map_err(|e| { + test_debug!("Failed to set TCP_NODELAY: {e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP read timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP read timeout: {e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|_e| { - test_debug!("Failed to set TCP write timeout: {:?}", &_e); + .map_err(|e| { + test_debug!("Failed to set TCP write timeout: {e:?}"); btc_error::ConnectionError })?; - if let Some(s) = self.runtime.sock.take() { - let _ = s.shutdown(Shutdown::Both); + if let Some(s_old) = self.runtime.sock.replace(s) { + let _ = s_old.shutdown(Shutdown::Both); } - - self.runtime.sock = Some(s); Ok(()) } Err(_e) => { - let s = self.runtime.sock.take(); - if let Some(s) = s { + if let Some(s) = self.runtime.sock.take() { let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 2f6d3112b7..c716f9f4e3 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -579,8 +579,8 @@ impl TestBurnchainBlock { pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); - for i in 0..self.txs.len() { - if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = self.txs[i] { + for tx in self.txs.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = tx { assert_eq!(data.block_height, self.block_height); data.consensus_hash = parent_snapshot.consensus_hash.clone(); } From 82313d350276c5bc12e54c049a5421c70d8bc82e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 15:17:55 -0500 Subject: [PATCH 24/82] chore: Apply PR comment from Aaron --- stackslib/src/chainstate/stacks/tests/mod.rs | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 77ebc89ff5..80b1d17a62 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -521,18 +521,14 @@ impl TestStacksNode { fork_tip: &BlockSnapshot, miner: &TestMiner, ) -> Option { - for commit_op in miner.block_commits.iter().rev() { - if let Some(sn) = SortitionDB::get_block_snapshot_for_winning_stacks_block( + miner.block_commits.iter().rev().find_map(|commit_op| { + SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() - { - return Some(sn); - } - } - return None; + }) } pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { From c9cbd23dea29d065d1a05cc542a90f093af851bb Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 24 Jan 2025 20:45:44 -0500 Subject: [PATCH 25/82] fix: Undo `_e` => `e` variable rename --- stackslib/src/burnchains/bitcoin/indexer.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index 899c96390c..69ba63c240 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -264,21 +264,21 @@ impl BitcoinIndexer { match net::TcpStream::connect((self.config.peer_host.as_str(), self.config.peer_port)) { Ok(s) => { // Disable Nagle algorithm - s.set_nodelay(true).map_err(|e| { - test_debug!("Failed to set TCP_NODELAY: {e:?}"); + s.set_nodelay(true).map_err(|_e| { + test_debug!("Failed to set TCP_NODELAY: {_e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP read timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP read timeout: {_e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) - .map_err(|e| { - test_debug!("Failed to set TCP write timeout: {e:?}"); + .map_err(|_e| { + test_debug!("Failed to set TCP write timeout: {_e:?}"); btc_error::ConnectionError })?; From e9c1ab8a1611e933f2d66c2f582cdcaed33960ee Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 27 Jan 2025 09:46:11 -0500 Subject: [PATCH 26/82] chore: Minor refactoring --- stackslib/src/chainstate/stacks/tests/mod.rs | 34 ++++++-------------- 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 85b43fb742..5e509ef0ae 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -967,22 +967,11 @@ pub fn get_last_microblock_header( miner: &TestMiner, parent_block_opt: Option<&StacksBlock>, ) -> Option { - let last_microblocks_opt = - parent_block_opt.and_then(|block| node.get_microblock_stream(miner, &block.block_hash())); - - let last_microblock_header_opt = match last_microblocks_opt { - Some(last_microblocks) => { - if last_microblocks.is_empty() { - None - } else { - let l = last_microblocks.len() - 1; - Some(last_microblocks[l].header.clone()) - } - } - None => None, - }; - - last_microblock_header_opt + parent_block_opt + .and_then(|block| node.get_microblock_stream(miner, &block.block_hash())) + .as_ref() + .and_then(|mblock_stream| mblock_stream.last()) + .map(|mblock| mblock.header.clone()) } pub fn get_all_mining_rewards( @@ -990,17 +979,14 @@ pub fn get_all_mining_rewards( tip: &StacksHeaderInfo, block_height: u64, ) -> Vec> { - let mut ret = vec![]; let mut tx = chainstate.index_tx_begin(); - for i in 0..block_height { - let block_rewards = + (0..block_height) + .map(|i| { StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, tip, i) - .unwrap(); - ret.push(block_rewards); - } - - ret + .unwrap() + }) + .collect() } pub fn make_coinbase(miner: &mut TestMiner, burnchain_height: usize) -> StacksTransaction { From 7699bc3ebb56c8f9100b738e7574c08c51195a85 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 27 Jan 2025 11:09:45 -0500 Subject: [PATCH 27/82] chore: Apply Clippy lint `manual_inspect` --- stackslib/src/burnchains/bitcoin/address.rs | 10 +-- stackslib/src/burnchains/bitcoin/bits.rs | 21 ++---- stackslib/src/burnchains/bitcoin/indexer.rs | 14 +--- stackslib/src/burnchains/bitcoin/spv.rs | 70 +++++------------- stackslib/src/burnchains/burnchain.rs | 33 ++++----- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +-- .../burn/operations/leader_block_commit.rs | 10 +-- stackslib/src/chainstate/nakamoto/mod.rs | 69 ++++++++---------- stackslib/src/chainstate/nakamoto/shadow.rs | 5 +- stackslib/src/chainstate/stacks/db/blocks.rs | 17 ++--- stackslib/src/chainstate/stacks/index/marf.rs | 60 +++++---------- .../src/chainstate/stacks/index/storage.rs | 12 +-- stackslib/src/chainstate/stacks/index/trie.rs | 9 +-- stackslib/src/config/mod.rs | 3 +- stackslib/src/net/asn.rs | 20 ++--- stackslib/src/net/chat.rs | 32 +++----- .../nakamoto/download_state_machine.rs | 19 ++--- .../download/nakamoto/tenure_downloader.rs | 15 ++-- .../nakamoto/tenure_downloader_set.rs | 7 +- stackslib/src/net/httpcore.rs | 13 ++-- stackslib/src/net/inv/epoch2x.rs | 34 +++------ stackslib/src/net/inv/nakamoto.rs | 41 +++++------ stackslib/src/net/neighbors/comms.rs | 6 +- stackslib/src/net/neighbors/db.rs | 12 +-- stackslib/src/net/p2p.rs | 38 +++------- stackslib/src/net/relay.rs | 73 ++++++++++--------- 26 files changed, 245 insertions(+), 408 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index 4cbc1ce80d..335177cd0c 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -302,9 +302,8 @@ impl SegwitBitcoinAddress { pub fn from_bech32(s: &str) -> Option { let (hrp, quintets, variant) = bech32::decode(s) - .map_err(|e| { - test_debug!("Failed to decode '{}': {:?}", s, &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode '{s}': {_e:?}"); }) .ok()?; @@ -327,9 +326,8 @@ impl SegwitBitcoinAddress { prog.append(&mut quintets[1..].to_vec()); let bytes = Vec::from_base32(&prog) - .map_err(|e| { - test_debug!("Failed to decode quintets: {:?}", &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode quintets: {_e:?}"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index 884f07a171..2a9745af25 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -112,22 +112,15 @@ impl BitcoinTxInputStructured { Instruction::PushBytes(payload) => payload, _ => { // not pushbytes, so this can't be a multisig script - test_debug!( - "Not a multisig script: Instruction {} is not a PushBytes", - i - ); + test_debug!("Not a multisig script: Instruction {i} is not a PushBytes"); return None; } }; let pubk = BitcoinPublicKey::from_slice(payload) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: pushbytes {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: pushbytes {i} is not a public key ({e:?})"); }) .ok()?; @@ -169,13 +162,9 @@ impl BitcoinTxInputStructured { for i in 0..pubkey_vecs.len() { let payload = &pubkey_vecs[i]; let pubk = BitcoinPublicKey::from_slice(&payload[..]) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: item {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: item {i} is not a public key ({e:?})"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index a9ccf8dfc5..c99e382769 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -627,12 +627,8 @@ impl BitcoinIndexer { )?; // what's the last header we have from the canonical history? - let canonical_end_block = orig_spv_client.get_headers_height().map_err(|e| { - error!( - "Failed to get the last block from {}", - canonical_headers_path - ); - e + let canonical_end_block = orig_spv_client.get_headers_height().inspect_err(|_e| { + error!("Failed to get the last block from {canonical_headers_path}"); })?; // bootstrap reorg client @@ -694,13 +690,12 @@ impl BitcoinIndexer { let reorg_headers = reorg_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read reorg Bitcoin headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; if reorg_headers.is_empty() { @@ -724,13 +719,12 @@ impl BitcoinIndexer { // got reorg headers. Find the equivalent headers in our canonical history let canonical_headers = orig_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read canonical headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; assert!( diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index fff8eaa06f..d12b261be9 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -832,10 +832,7 @@ impl SpvClient { // fetching headers in ascending order, so verify that the first item in // `block_headers` connects to a parent in the DB (if it has one) self.insert_block_headers_after(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let chain_tip = self.get_headers_height()?; @@ -843,22 +840,15 @@ impl SpvClient { (insert_height.saturating_sub(1)) / BLOCK_DIFFICULTY_CHUNK_SIZE, chain_tip / BLOCK_DIFFICULTY_CHUNK_SIZE + 1, ) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } else { // fetching headers in descending order, so verify that the last item in // `block_headers` connects to a child in the DB (if it has one) let headers_len = block_headers.len() as u64; self.insert_block_headers_before(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let interval_start = if insert_height % BLOCK_DIFFICULTY_CHUNK_SIZE == 0 { @@ -870,12 +860,8 @@ impl SpvClient { let interval_end = (insert_height + 1 + headers_len) / BLOCK_DIFFICULTY_CHUNK_SIZE + 1; self.validate_header_work(interval_start, interval_end) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } @@ -883,16 +869,12 @@ impl SpvClient { let total_work_after = self.update_chain_work()?; if total_work_after < total_work_before { error!( - "New headers represent less work than the old headers ({} < {})", - total_work_before, total_work_after + "New headers represent less work than the old headers ({total_work_before} < {total_work_after})" ); return Err(btc_error::InvalidChainWork); } - debug!( - "Handled {} Headers: {}-{}", - num_headers, first_header_hash, last_header_hash - ); + debug!("Handled {num_headers} Headers: {first_header_hash}-{last_header_hash}"); } else { debug!("Handled empty header reply"); } @@ -956,22 +938,16 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; - - let parent_header = match self.read_block_header(start_height)? { - Some(header) => header, - None => { - warn!( - "No header for block {} -- cannot insert {} headers into {}", - start_height, - block_headers.len(), - self.headers_path - ); - return Err(btc_error::NoncontiguousHeader); - } + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; + + let Some(parent_header) = self.read_block_header(start_height)? else { + warn!( + "No header for block {} -- cannot insert {} headers into {}", + start_height, + block_headers.len(), + self.headers_path + ); + return Err(btc_error::NoncontiguousHeader); }; // contiguous? @@ -1010,10 +986,7 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; match self.read_block_header(end_height)? { Some(child_header) => { @@ -1028,10 +1001,7 @@ impl SpvClient { None => { // if we're inserting headers in reverse order, we're not guaranteed to have the // child. - debug!( - "No header for child block {}, so will not validate continuity", - end_height - ); + debug!("No header for child block {end_height}, so will not validate continuity"); } } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 18fb27e27e..8bc7289ec2 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -683,11 +683,12 @@ impl Burnchain { if headers_height == 0 || headers_height < self.first_block_height { debug!("Fetch initial headers"); - indexer.sync_headers(headers_height, None).map_err(|e| { - error!("Failed to sync initial headers"); - sleep_ms(100); - e - })?; + indexer + .sync_headers(headers_height, None) + .inspect_err(|_e| { + error!("Failed to sync initial headers"); + sleep_ms(100); + })?; } Ok(()) } @@ -1137,13 +1138,9 @@ impl Burnchain { let headers_path = indexer.get_headers_path(); // sanity check -- what is the height of our highest header - let headers_height = indexer.get_highest_header_height().map_err(|e| { - error!( - "Failed to read headers height from {}: {:?}", - headers_path, &e - ); - e - })?; + let headers_height = indexer + .get_highest_header_height() + .inspect_err(|e| error!("Failed to read headers height from {headers_path}: {e:?}"))?; if headers_height == 0 { return Ok((0, false)); @@ -1152,16 +1149,12 @@ impl Burnchain { // did we encounter a reorg since last sync? Find the highest common ancestor of the // remote bitcoin peer's chain state. // Note that this value is 0-indexed -- the smallest possible value it returns is 0. - let reorg_height = indexer.find_chain_reorg().map_err(|e| { - error!("Failed to check for reorgs from {}: {:?}", headers_path, &e); - e - })?; + let reorg_height = indexer + .find_chain_reorg() + .inspect_err(|e| error!("Failed to check for reorgs from {headers_path}: {e:?}"))?; if reorg_height < headers_height { - warn!( - "Burnchain reorg detected: highest common ancestor at height {}", - reorg_height - ); + warn!("Burnchain reorg detected: highest common ancestor at height {reorg_height}"); return Ok((reorg_height, true)); } else { // no reorg diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 791ab19006..2fb6c1ca86 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -4896,16 +4896,12 @@ impl SortitionDB { let qry = "SELECT * FROM snapshots WHERE sortition_id = ?1"; let args = [&sortition_id]; query_row_panic(conn, qry, &args, || { - format!( - "FATAL: multiple block snapshots for the same block {}", - sortition_id - ) + format!("FATAL: multiple block snapshots for the same block {sortition_id}") }) - .map(|x| { + .inspect(|x| { if x.is_none() { - test_debug!("No snapshot with sortition ID {}", sortition_id); + test_debug!("No snapshot with sortition ID {sortition_id}"); } - x }) } diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index f996fd295a..64eff0a94c 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1131,19 +1131,17 @@ impl LeaderBlockCommitOp { .is_after_pox_sunset_end(self.block_height, epoch.epoch_id) { // sunset has begun and we're not in epoch 2.1 or later, so apply sunset check - self.check_after_pox_sunset().map_err(|e| { - warn!("Invalid block-commit: bad PoX after sunset: {:?}", &e; + self.check_after_pox_sunset().inspect_err(|e| { + warn!("Invalid block-commit: bad PoX after sunset: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })?; vec![] } else { // either in epoch 2.1, or the PoX sunset hasn't completed yet self.check_pox(epoch.epoch_id, burnchain, tx, reward_set_info) - .map_err(|e| { - warn!("Invalid block-commit: bad PoX: {:?}", &e; + .inspect_err(|e| { + warn!("Invalid block-commit: bad PoX: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })? }; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 57bef8f749..5455e4e360 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1710,29 +1710,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_processed(block_id).map_err(|e| { - warn!("Failed to mark {} as processed: {:?}", block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_processed(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as processed: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -1748,29 +1745,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_orphaned(block_id).map_err(|e| { - warn!("Failed to mark {} as orphaned: {:?}", &block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_orphaned(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as orphaned: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - &block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -2352,12 +2346,11 @@ impl NakamotoChainState { let miner_pubkey_hash160 = leader_key .interpret_nakamoto_signing_key() .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!( "Leader key did not contain a hash160 of the miner signing public key"; "leader_key" => ?leader_key, ); - e })?; // attaches to burn chain @@ -2959,12 +2952,11 @@ impl NakamotoChainState { warn!("No VRF proof for {}", &parent_sn.consensus_hash); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find parent VRF proof"; "tip_block_id" => %tip_block_id, "parent consensus_hash" => %parent_sn.consensus_hash, "block consensus_hash" => %consensus_hash); - e })?; Ok(parent_vrf_proof) @@ -3029,12 +3021,11 @@ impl NakamotoChainState { } let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) - .map_err(|e| { + .inspect_err(|_e| { warn!("Failed to load VRF proof: could not decode"; "vrf_proof" => %bytes, "tenure_start_block_id" => %tenure_start_block_id, ); - e })?; Ok(Some(proof)) } else { @@ -3087,25 +3078,23 @@ impl NakamotoChainState { let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; let block_commit = get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; // N.B. passing block.block_id() here means that we'll look into the parent tenure diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 67a57a2ca0..46849e5fbb 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -347,14 +347,13 @@ impl NakamotoChainState { let vrf_proof = Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &tenure_consensus_hash)? .ok_or_else(|| { - warn!("No VRF proof for {}", &tenure_consensus_hash); + warn!("No VRF proof for {tenure_consensus_hash}"); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find shadow tenure VRF proof"; "tip_block_id" => %tip_block_id, "shadow consensus_hash" => %tenure_consensus_hash); - e })?; return Ok(Some(vrf_proof)); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 295f3708aa..4a5e2443e1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -500,20 +500,19 @@ impl StacksChainState { .open(&path_tmp) .map_err(|e| { if e.kind() == io::ErrorKind::NotFound { - error!("File not found: {:?}", &path_tmp); + error!("File not found: {path_tmp:?}"); Error::DBError(db_error::NotFoundError) } else { - error!("Failed to open {:?}: {:?}", &path_tmp, &e); + error!("Failed to open {path_tmp:?}: {e:?}"); Error::DBError(db_error::IOError(e)) } })?; - writer(&mut fd).map_err(|e| { + writer(&mut fd).inspect_err(|_e| { if delete_on_error { // abort let _ = fs::remove_file(&path_tmp); } - e })?; fd.sync_all() @@ -3983,7 +3982,7 @@ impl StacksChainState { } for (consensus_hash, anchored_block_hash) in to_delete.into_iter() { - info!("Orphan {}/{}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork.", &consensus_hash, &anchored_block_hash); + info!("Orphan {consensus_hash}/{anchored_block_hash}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork."); let _ = StacksChainState::set_block_processed( blocks_tx, None, @@ -3992,12 +3991,8 @@ impl StacksChainState { &anchored_block_hash, false, ) - .map_err(|e| { - warn!( - "Failed to orphan {}/{}: {:?}", - &consensus_hash, &anchored_block_hash, &e - ); - e + .inspect_err(|e| { + warn!("Failed to orphan {consensus_hash}/{anchored_block_hash}: {e:?}") }); } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 368c21c204..7bec45bdfe 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -440,13 +440,12 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { if new_extension { self.set_block_heights(chain_tip, next_chain_tip, block_height) - .map_err(|e| { + .inspect_err(|_e| { self.open_chain_tip.take(); - e })?; } - debug!("Opened {} to {}", chain_tip, next_chain_tip); + debug!("Opened {chain_tip} to {next_chain_tip}"); Ok(()) } @@ -932,9 +931,8 @@ impl MARF { let mut cursor = TrieCursor::new(path, storage.root_trieptr()); // walk to insertion point - let mut node = Trie::read_root_nohash(storage).map_err(|e| { - test_debug!("Failed to read root of {:?}: {:?}", block_hash, &e); - e + let mut node = Trie::read_root_nohash(storage).inspect_err(|_e| { + test_debug!("Failed to read root of {block_hash:?}: {_e:?}"); })?; for _ in 0..(cursor.path.len() + 1) { @@ -956,7 +954,7 @@ impl MARF { )); } - trace!("Cursor reached leaf {:?}", &node); + trace!("Cursor reached leaf {node:?}"); storage.bench_mut().marf_walk_from_finish(); return Ok((cursor, node)); } @@ -1035,24 +1033,16 @@ impl MARF { block_hash: &T, path: &TrieHash, ) -> Result, Error> { - trace!("MARF::get_path({:?}) {:?}", block_hash, path); + trace!("MARF::get_path({block_hash:?}) {path:?}"); // a NotFoundError _here_ means that a block didn't exist - storage.open_block(block_hash).map_err(|e| { - test_debug!("Failed to open block {:?}: {:?}", block_hash, &e); - e + storage.open_block(block_hash).inspect_err(|_e| { + test_debug!("Failed to open block {block_hash:?}: {_e:?}"); })?; // a NotFoundError _here_ means that the key doesn't exist in this view - let (cursor, node) = MARF::walk(storage, block_hash, path).map_err(|e| { - trace!( - "Failed to look up key {:?} {:?}: {:?}", - &block_hash, - path, - &e - ); - e - })?; + let (cursor, node) = MARF::walk(storage, block_hash, path) + .inspect_err(|e| trace!("Failed to look up key {block_hash:?} {path:?}: {e:?}"))?; // both of these get caught by get_by_key and turned into Ok(None) // and a lot of downstream code seems to depend on that behavior, but @@ -1177,13 +1167,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed path lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed path lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1208,13 +1194,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed key lookup '{}': {:?}", key, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed key lookup '{key}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1237,13 +1219,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed hash lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed hash lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 0eb60f25b4..db99f8004e 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -892,10 +892,8 @@ impl TrieRAM { let root_disk_ptr = BLOCK_HEADER_HASH_ENCODED_SIZE as u64 + 4; let root_ptr = TriePtr::new(TrieNodeID::Node256 as u8, 0, root_disk_ptr as u32); - let (mut root_node, root_hash) = read_nodetype(f, &root_ptr).map_err(|e| { - error!("Failed to read root node info for {:?}: {:?}", bhh, &e); - e - })?; + let (mut root_node, root_hash) = read_nodetype(f, &root_ptr) + .inspect_err(|e| error!("Failed to read root node info for {bhh:?}: {e:?}"))?; let mut next_index = 1; @@ -922,10 +920,8 @@ impl TrieRAM { let next_ptr = frontier .pop_front() .expect("BUG: no ptr in non-empty frontier"); - let (mut next_node, next_hash) = read_nodetype(f, &next_ptr).map_err(|e| { - error!("Failed to read node at {:?}: {:?}", &next_ptr, &e); - e - })?; + let (mut next_node, next_hash) = read_nodetype(f, &next_ptr) + .inspect_err(|e| error!("Failed to read node at {next_ptr:?}: {e:?}"))?; if !next_node.is_leaf() { // queue children in the same order we stored them diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 0603c74c43..e701858fd1 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -217,22 +217,19 @@ impl Trie { // ptr is a backptr -- find the block let back_block_hash = storage .get_block_from_local_id(ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!("Failed to get block from local ID {}", ptr.back_block()); - e })? .clone(); storage .open_block_known_id(&back_block_hash, ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!( - "Failed to open block {} with id {}: {:?}", + "Failed to open block {} with id {}: {_e:?}", &back_block_hash, ptr.back_block(), - &e ); - e })?; let backptr = ptr.from_backptr(); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f4472d8fbc..ad1e2f6f1d 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1579,9 +1579,8 @@ impl BurnchainConfigFile { .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), max_unspent_utxos: self .max_unspent_utxos - .map(|val| { + .inspect(|&val| { assert!(val <= 1024, "Value for max_unspent_utxos should be <= 1024"); - val }) .or(default_burnchain_config.max_unspent_utxos), }; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index c28e82484b..fb1f66b481 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -122,9 +122,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Line does not match ANS4 regex".to_string(), )) - .map_err(|e| { - debug!("Failed to read line \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to read line \"{buf}\""); })?; let prefix_octets_str = caps @@ -132,9 +131,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ANS4 prefix".to_string(), )) - .map_err(|e| { - debug!("Failed to get octets of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get octets of \"{buf}\""); })? .as_str(); @@ -143,9 +141,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN4 prefix mask".to_string(), )) - .map_err(|e| { - debug!("Failed to get mask of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get mask of \"{buf}\""); })? .as_str(); @@ -154,9 +151,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN ID".to_string(), )) - .map_err(|e| { - debug!("Failed to get ASN of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get ASN of \"{buf}\""); })? .as_str(); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 740d81b254..ecf533e21b 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -962,10 +962,9 @@ impl ConversationP2P { reply_message, request_preamble.seq, )?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!("Unable to reply a {}: {:?}", _msgtype, &e); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|e| debug!("Unable to reply a {_msgtype}: {e:?}"))?; Ok(reply_handle) } @@ -981,10 +980,9 @@ impl ConversationP2P { let _msgtype = forward_message.get_message_name().to_owned(); let fwd = self.sign_relay_message(local_peer, burnchain_view, relay_hints, forward_message)?; - let fwd_handle = self.relay_signed_message(fwd).map_err(|e| { - debug!("Unable to forward a {}: {:?}", _msgtype, &e); - e - })?; + let fwd_handle = self + .relay_signed_message(fwd) + .inspect_err(|e| debug!("Unable to forward a {_msgtype}: {e:?}"))?; Ok(fwd_handle) } @@ -1475,13 +1473,9 @@ impl ConversationP2P { neighbors: neighbor_addrs, }); let reply = self.sign_reply(chain_view, &local_peer.private_key, payload, preamble.seq)?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!( - "Outbox to {:?} is full; cannot reply to GetNeighbors", - &self - ); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|_e| debug!("Outbox to {self:?} is full; cannot reply to GetNeighbors"))?; Ok(reply_handle) } @@ -1747,12 +1741,8 @@ impl ConversationP2P { &network.stacks_tip.block_hash, reward_cycle, )?; - let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { - warn!( - "Failed to create a NakamotoInv response to {:?}: {:?}", - get_nakamoto_inv, &e - ); - e + let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).inspect_err(|e| { + warn!("Failed to create a NakamotoInv response to {get_nakamoto_inv:?}: {e:?}") })?; debug!( diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 36443b46e4..b5a6af1153 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1186,12 +1186,11 @@ impl NakamotoDownloadStateMachine { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e - ); - e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr + ) }); debug!( @@ -1257,13 +1256,11 @@ impl NakamotoDownloadStateMachine { { if let Some(highest_complete_tenure_downloader) = downloader .make_highest_complete_tenure_downloader() - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e + "Failed to make highest complete tenure downloader for {:?}: {e:?}", + &downloader.unconfirmed_tenure_id() + ) }) .ok() { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 1d4d680c43..6e98703956 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -781,9 +781,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_start_block(block)?; Ok(None) @@ -794,9 +793,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_end_block(&block)?; Ok(None) @@ -807,9 +805,8 @@ impl NakamotoTenureDownloader { &end_block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e + let blocks = response.decode_nakamoto_tenure().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {e:?}") })?; let blocks_opt = self.try_accept_tenure_blocks(blocks)?; Ok(blocks_opt) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index f8054828b6..d73342164e 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -571,12 +571,11 @@ impl NakamotoTenureDownloaderSet { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr ); - e }); debug!( diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 00fa0948bd..2a47be3547 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1275,9 +1275,8 @@ impl StacksHttp { return Err(NetError::InvalidState); } if let Some(reply) = self.reply.as_mut() { - match reply.stream.consume_data(fd).map_err(|e| { + match reply.stream.consume_data(fd).inspect_err(|_e| { self.reset(); - e })? { (Some((byte_vec, bytes_total)), sz) => { // done receiving @@ -1491,11 +1490,11 @@ impl ProtocolFamily for StacksHttp { } // message of unknown length. Buffer up and maybe we can parse it. - let (message_bytes_opt, num_read) = - self.consume_data(http_response_preamble, fd).map_err(|e| { - self.reset(); - e - })?; + let (message_bytes_opt, num_read) = self + .consume_data(http_response_preamble, fd) + .inspect_err(|_e| { + self.reset(); + })?; match message_bytes_opt { Some((message_bytes, total_bytes_consumed)) => { diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 322b1b826c..99253816f5 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1847,10 +1847,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getpoxinv_begin(request, target_pox_reward_cycle); if let Some(event_id) = event_id_opt { @@ -2040,10 +2037,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getblocksinv_begin(request, target_block_reward_cycle, num_blocks_expected); if let Some(event_id) = event_id_opt { @@ -2605,18 +2599,13 @@ impl PeerNetwork { // if this succeeds, then we should be able to make a BlocksInv let ancestor_sn = self .get_ancestor_sortition_snapshot(sortdb, target_block_height) - .map_err(|e| { - debug!( - "Failed to load ancestor sortition snapshot at height {}: {:?}", - target_block_height, &e - ); - e + .inspect_err(|e| { + debug!( "Failed to load ancestor sortition snapshot at height {target_block_height}: {e:?}") })?; - let tip_sn = self.get_tip_sortition_snapshot(sortdb).map_err(|e| { - debug!("Failed to load tip sortition snapshot: {:?}", &e); - e - })?; + let tip_sn = self + .get_tip_sortition_snapshot(sortdb) + .inspect_err(|e| debug!("Failed to load tip sortition snapshot: {e:?}"))?; let getblocksinv = GetBlocksInv { consensus_hash: ancestor_sn.consensus_hash, @@ -2634,12 +2623,11 @@ impl PeerNetwork { let blocks_inv = ConversationP2P::make_getblocksinv_response(self, sortdb, chainstate, &getblocksinv) - .map_err(|e| { + .inspect_err(|e| { debug!( - "Failed to load blocks inventory at reward cycle {} ({:?}): {:?}", - reward_cycle, &ancestor_sn.consensus_hash, &e - ); - e + "Failed to load blocks inventory at reward cycle {reward_cycle} ({:?}): {e:?}", + &ancestor_sn.consensus_hash + ); })?; match blocks_inv { diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 74cc8f0d0e..9bebbaf642 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -982,24 +982,22 @@ impl NakamotoInvStateMachine { ); let Some(inv) = self.inventories.get_mut(&naddr) else { debug!( - "{:?}: Got a reply for an untracked inventory peer {}: {:?}", + "{:?}: Got a reply for an untracked inventory peer {naddr}: {reply:?}", network.get_local_peer(), - &naddr, - &reply ); continue; }; - let Ok(inv_learned) = inv.getnakamotoinv_try_finish(network, reply).map_err(|e| { - warn!( - "{:?}: Failed to finish inventory sync to {}: {:?}", - network.get_local_peer(), - &naddr, - &e - ); - self.comms.add_broken(network, &naddr); - e - }) else { + let Ok(inv_learned) = inv + .getnakamotoinv_try_finish(network, reply) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish inventory sync to {naddr}: {e:?}", + network.get_local_peer() + ); + self.comms.add_broken(network, &naddr); + }) + else { continue; }; @@ -1051,14 +1049,15 @@ impl NakamotoInvStateMachine { &e ); } - let Ok((_, learned)) = self.process_getnakamotoinv_finishes(network).map_err(|e| { - warn!( - "{:?}: Failed to finish Nakamoto tenure inventory sync: {:?}", - network.get_local_peer(), - &e - ); - e - }) else { + let Ok((_, learned)) = self + .process_getnakamotoinv_finishes(network) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish Nakamoto tenure inventory sync: {e:?}", + network.get_local_peer(), + ) + }) + else { self.last_sort_tip = Some(network.burnchain_tip.clone()); return false; }; diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 821952af33..48759c913d 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -106,14 +106,12 @@ pub trait NeighborComms { let msg = network .sign_for_neighbor(&nk, StacksMessageType::Handshake(handshake_data)) - .map_err(|e| { + .inspect_err(|_e| { info!( - "{:?}: Failed to sign for peer {:?}", + "{:?}: Failed to sign for peer {nk:?}", network.get_local_peer(), - &nk ); self.add_dead(network, &nk); - e })?; network diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index a9d2268fca..3b1d99e906 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -223,26 +223,22 @@ pub trait NeighborWalkDB { // favor neighbors with older last-contact times let next_neighbors_res = self .get_fresh_random_neighbors(network, (NUM_NEIGHBORS as u64) * 2) - .map_err(|e| { + .inspect_err(|e| { debug!( - "{:?}: Failed to load fresh initial walk neighbors: {:?}", + "{:?}: Failed to load fresh initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e }); let db_neighbors = if let Ok(neighbors) = next_neighbors_res { neighbors } else { let any_neighbors = Self::pick_walk_neighbors(network, (NUM_NEIGHBORS as u64) * 2, 0) - .map_err(|e| { + .inspect_err(|e| { info!( - "{:?}: Failed to load any initial walk neighbors: {:?}", + "{:?}: Failed to load any initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e })?; any_neighbors diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 54c0428548..40591c6ddc 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -2692,22 +2692,16 @@ impl PeerNetwork { &self.local_peer.private_key, StacksMessageType::NatPunchRequest(nonce), ) - .map_err(|e| { - info!("Failed to sign NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to sign NAT punch request: {e:?}"))?; let mut rh = convo .send_signed_request(natpunch_request, self.connection_opts.timeout) - .map_err(|e| { - info!("Failed to send NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to send NAT punch request: {e:?}"))?; - self.saturate_p2p_socket(event_id, &mut rh).map_err(|e| { - info!("Failed to saturate NAT punch socket on event {}", &event_id); - e - })?; + self.saturate_p2p_socket(event_id, &mut rh) + .inspect_err(|_e| { + info!("Failed to saturate NAT punch socket on event {event_id}") + })?; self.public_ip_reply_handle = Some(rh); break; @@ -3669,15 +3663,13 @@ impl PeerNetwork { // always do block download let new_blocks = self .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { + .inspect_err(|e| { warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e + "{:?}: Failed to perform Nakamoto block sync: {e:?}", + &self.get_local_peer() + ) }) - .unwrap_or(HashMap::new()); + .unwrap_or_default(); network_result.consume_nakamoto_blocks(new_blocks); @@ -4407,13 +4399,7 @@ impl PeerNetwork { sortdb, &OnChainRewardSetProvider::new(), ) - .map_err(|e| { - warn!( - "Failed to load reward cycle info for cycle {}: {:?}", - rc, &e - ); - e - }) + .inspect_err(|e| warn!("Failed to load reward cycle info for cycle {rc}: {e:?}")) .unwrap_or(None) else { continue; }; diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 26c0fed831..6b1995ab65 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -949,14 +949,12 @@ impl Relayer { if chainstate .nakamoto_blocks_db() .has_nakamoto_block_with_index_hash(&block.header.block_id()) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to determine if we have Nakamoto block {}/{}: {:?}", + "Failed to determine if we have Nakamoto block {}/{}: {e:?}", &block.header.consensus_hash, - &block.header.block_hash(), - &e + &block.header.block_hash() ); - e })? { if force_broadcast { @@ -3135,21 +3133,22 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); continue; } }; // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to announce {} entries to {:?}: {:?}", - &self.local_peer, num_blocks, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to announce {num_blocks} entries to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } } @@ -3170,26 +3169,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push block {}/{} to {:?}", - &self.local_peer, &ch, &blk_hash, recipient + "{:?}: Push block {ch}/{blk_hash} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push block {}/{} to {:?}: {:?}", - &self.local_peer, &ch, &blk_hash, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push block {ch}/{blk_hash} to {recipient:?}: {e:?}", + &self.local_peer + ) + }); } /// Try to push a confirmed microblock stream to a peer. @@ -3210,26 +3210,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push microblocks for {} to {:?}", - &self.local_peer, &idx_bhh, recipient + "{:?}: Push microblocks for {idx_bhh} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push microblocks for {} to {:?}: {:?}", - &self.local_peer, &idx_bhh, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push microblocks for {idx_bhh} to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } /// Announce blocks that we have to an outbound peer that doesn't have them. From e6b8ca9d10d2aa09ce76f6b520c4f649a1f039e2 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 27 Jan 2025 13:51:45 -0600 Subject: [PATCH 28/82] fix: #5750 better win detection on restart --- CHANGELOG.md | 7 + .../stacks-node/src/nakamoto_node/relayer.rs | 12 +- .../src/tests/nakamoto_integrations.rs | 220 ++++++++++++++++++ 3 files changed, 237 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9631ccf65..cc5010366f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Fixed + +- Miners who restart their nodes immediately before a winning tenure now correctly detect that + they won the tenure after their nodes restart ([#5750](https://github.com/stacks-network/stacks-core/issues/5750)). + ## [3.1.0.0.4] ### Added diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index e8b848e748..8e64dd0da6 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -705,8 +705,16 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); // always clear this even if this isn't the latest sortition - let cleared = self.last_commits.remove(&sn.winning_block_txid); - let won_sortition = sn.sortition && cleared; + let _cleared = self.last_commits.remove(&sn.winning_block_txid); + let was_winning_pkh = if let (Some(ref winning_pkh), Some(ref my_pkh)) = + (sn.miner_pk_hash, self.get_mining_key_pkh()) + { + winning_pkh == my_pkh + } else { + false + }; + + let won_sortition = sn.sortition && was_winning_pkh; if won_sortition { increment_stx_blocks_mined_counter(); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index be3a4213f6..61f7e29397 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1740,6 +1740,226 @@ fn simple_neon_integration() { run_loop_thread.join().unwrap(); } +#[test] +#[ignore] +/// Test a scenario in which a miner is restarted right before a tenure +/// which they won. The miner, on restart, should begin mining the new tenure. +fn restarting_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = "127.0.0.1:6000".to_string(); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.activated_vrf_key_path = + Some(format!("{}/vrf_key", naka_conf.node.working_dir)); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); + let sender_sk = Secp256k1PrivateKey::new(); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + let coord_channel = run_loop.coordinator_channels(); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let _run_loop_2_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed: blocks_processed_2, + naka_submitted_commits: commits_submitted_2, + naka_proposed_blocks: proposals_submitted_2, + .. + } = run_loop_2.counters(); + let coord_channel_2 = run_loop_2.coordinator_channels(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer_multinode( + &signers, + &[&naka_conf, &naka_conf], + vec![proposals_submitted, proposals_submitted_2], + ); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 2 nakamoto tenures + for _i in 0..2 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel, + &commits_submitted, + ) + .unwrap(); + } + + let last_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => last_tip.stacks_block_height, + "is_nakamoto" => last_tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // close the current miner + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_thread.join().unwrap(); + + // mine a bitcoin block -- this should include a winning commit from + // the miner + btc_regtest_controller.build_next_block(1); + + // start it back up + + let _run_loop_thread = thread::spawn(move || run_loop_2.start(None, 0)); + wait_for_runloop(&blocks_processed_2); + + info!(" ================= RESTARTED THE MINER ================="); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + wait_for(60, || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + Ok(tip.stacks_block_height > last_tip.stacks_block_height) + }) + .unwrap_or_else(|e| { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + + error!( + "Failed to get a new block after restart"; + "last_tip_height" => last_tip.stacks_block_height, + "latest_tip" => tip.stacks_block_height, + "error" => &e, + ); + + panic!("{e}") + }); + + // Mine 2 more nakamoto tenures + for _i in 0..2 { + next_block_and_mine_commit( + &mut btc_regtest_controller, + 60, + &coord_channel_2, + &commits_submitted_2, + ) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "=== Last tip ==="; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + // make sure every burn block after the nakamoto transition has a mined + // nakamoto block in it. + let missing = test_observer::get_missing_burn_blocks(220..=bhh).unwrap(); + + // This test was flakey because it was sometimes missing burn block 230, which is right at the Nakamoto transition + // So it was possible to miss a burn block during the transition + // But I don't it matters at this point since the Nakamoto transition has already happened on mainnet + // So just print a warning instead, don't count it as an error + let missing_is_error: Vec<_> = missing + .into_iter() + .filter(|i| match i { + 230 => { + warn!("Missing burn block {i}"); + false + } + _ => true, + }) + .collect(); + + if !missing_is_error.is_empty() { + panic!("Missing the following burn blocks: {missing_is_error:?}"); + } + + check_nakamoto_empty_block_heuristics(); + + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 4); +} + #[test] #[ignore] /// This test spins up a nakamoto-neon node. From 6785cf262ff838debb1c42c2eb6173ec07b3b01e Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Mon, 27 Jan 2025 16:02:21 -0600 Subject: [PATCH 29/82] ci: add test to workflow --- .github/workflows/bitcoin-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 363e02044f..ff0218bdcd 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -169,6 +169,7 @@ jobs: - tests::nakamoto_integrations::v3_blockbyheight_api_endpoint - tests::nakamoto_integrations::mine_invalid_principal_from_consensus_buff - tests::nakamoto_integrations::test_tenure_extend_from_flashblocks + - tests::nakamoto_integrations::restarting_miner # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected From 3f60bd05a3414566d1c5d02e96a9c20d1bc818f2 Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Tue, 21 Jan 2025 19:19:38 +0200 Subject: [PATCH 30/82] deps: fix crate vulnerabilities --- Cargo.lock | 85 +++++++++++++++++++++++++----------------------------- 1 file changed, 39 insertions(+), 46 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5569bf1f88..51d2299fa7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -606,7 +606,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -784,16 +784,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle", "zeroize", @@ -807,7 +806,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -907,7 +906,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "ed25519", "rand_core 0.6.4", "serde", @@ -1082,9 +1081,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1092,9 +1091,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -1109,9 +1108,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1143,26 +1142,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1172,9 +1171,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1259,9 +1258,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -1486,14 +1485,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1881,9 +1880,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2103,7 +2102,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2145,12 +2144,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" -[[package]] -name = "platforms" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" - [[package]] name = "polling" version = "2.8.0" @@ -2453,7 +2446,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "hyper 0.14.28", @@ -2627,9 +2620,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring", @@ -2762,7 +2755,7 @@ checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3288,9 +3281,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -3365,7 +3358,7 @@ checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3505,7 +3498,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 0.8.10", + "mio 0.8.11", "num_cpus", "parking_lot", "pin-project-lite", @@ -3595,7 +3588,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3841,7 +3834,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -3875,7 +3868,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4114,7 +4107,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] From cef96ad23f06ee757654c29e85090b8f0f8fc6f0 Mon Sep 17 00:00:00 2001 From: Dimitris Apostolou Date: Tue, 21 Jan 2025 23:02:33 +0200 Subject: [PATCH 31/82] deps: fix more crate vulnerabilities --- Cargo.lock | 28 +++++++++++++++++++--------- Cargo.toml | 4 ++-- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 51d2299fa7..e56b89f8d7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -620,7 +620,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "integer-sqrt", "lazy_static", "mutants", @@ -1039,6 +1039,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1301,8 +1307,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", - "allocator-api2", - "serde", ] [[package]] @@ -1310,6 +1314,12 @@ name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] [[package]] name = "hashlink" @@ -1732,7 +1742,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libc", "libstackerdb", @@ -2432,7 +2442,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.15.2", ] [[package]] @@ -3035,7 +3045,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libc", "nix", @@ -3066,7 +3076,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "http-types", "lazy_static", "libc", @@ -3106,7 +3116,7 @@ dependencies = [ "backoff", "clap", "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libsigner", "libstackerdb", @@ -3143,7 +3153,7 @@ dependencies = [ "clarity", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "integer-sqrt", "lazy_static", "libc", diff --git a/Cargo.toml b/Cargo.toml index 194e946ef4..3b9486b61d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,8 +15,8 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } -hashbrown = { version = "0.14.3", features = ["serde"] } -rand_core = "0.6" +hashbrown = { version = "0.15.2", features = ["serde"] } +rand_core = "0.6.4" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" From 1e8adfb57c27ea56b526b64a43bf52ee5edafc18 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 28 Jan 2025 08:52:41 -0600 Subject: [PATCH 32/82] chore: add mutants::skip to process_sortition --- testnet/stacks-node/src/nakamoto_node/relayer.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index 8e64dd0da6..21ec48f195 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -694,6 +694,9 @@ impl RelayerThread { /// this sortition matches the sortition tip and we have a parent to build atop. /// /// Otherwise, returns None, meaning no action will be taken. + // This method is covered by the e2e bitcoind tests, which do not show up + // in mutant coverage. + #[cfg_attr(test, mutants::skip)] fn process_sortition( &mut self, consensus_hash: ConsensusHash, From 5456caa6b97271bdb2645539df9f14cf30d71138 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Tue, 28 Jan 2025 11:19:25 -0600 Subject: [PATCH 33/82] fix introduced typo in changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index d8a57ed393..fde3e2e6d4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - The RPC endpoint `/v3/block_proposal` no longer will evaluate block proposals more than `block_proposal_max_age_secs` old -- When a transaction is dropped due to replauce-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) +- When a transaction is dropped due to replace-by-fee, the `/drop_mempool_tx` event observer payload now includes `new_txid`, which is the transaction that replaced this dropped transaction. When a transaction is dropped for other reasons, `new_txid` is `null`. [#5381](https://github.com/stacks-network/stacks-core/pull/5381) - Nodes will assume that all PoX anchor blocks exist by default, and stall initial block download indefinitely to await their arrival (#5502) ### Fixed From 45ae0153843a91788e7990e164762d22fa163e7e Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Tue, 28 Jan 2025 18:12:15 +0000 Subject: [PATCH 34/82] initial prototype for configurable block_rejction timeout steps --- stackslib/src/config/mod.rs | 27 ++++++++++ .../src/nakamoto_node/signer_coordinator.rs | 50 ++++++++++++------- testnet/stacks-node/src/tests/signer/v0.rs | 6 ++- 3 files changed, 64 insertions(+), 19 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 6d2d5e4389..b26676294f 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -36,6 +36,8 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; +use std::collections::BTreeMap; + use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; @@ -2156,6 +2158,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// + pub block_rejection_timeout_steps: BTreeMap, } impl Default for MinerConfig { @@ -2194,6 +2198,14 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + block_rejection_timeout_steps: { + let mut timeouts_btree = BTreeMap::::new(); + timeouts_btree.insert(0, Duration::from_secs(600)); + timeouts_btree.insert(1, Duration::from_secs(300)); + timeouts_btree.insert(2, Duration::from_secs(150)); + timeouts_btree.insert(3, Duration::from_secs(0)); + timeouts_btree + }, } } } @@ -2590,6 +2602,7 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub block_rejection_timeout_steps: Option>, } impl MinerConfigFile { @@ -2732,6 +2745,20 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + block_rejection_timeout_steps: { + if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { + let mut timeouts_btree = BTreeMap::::new(); + for (slice, millis) in block_rejection_timeout_items.iter() { + match slice.parse::() { + Ok(slice_slot) => timeouts_btree.insert(slice_slot, Duration::from_millis(*millis)), + Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) + }; + } + timeouts_btree + } else{ + miner_default_config.block_rejection_timeout_steps + } + } }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ce136a05a2..686d62a2d2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -38,6 +38,8 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; +use std::collections::BTreeMap; +use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -53,9 +55,6 @@ use crate::Config; pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = LazyLock::new(TestFlag::default); -/// Base timeout for rejections heuristic -pub static BLOCK_REJECTIONS_TIMEOUT_BASE: u64 = 600; - /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -81,6 +80,8 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, + /// + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -126,6 +127,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), + block_rejection_timeout_steps: config.miner.block_rejection_timeout_steps.clone(), }; // Spawn the signer DB listener thread @@ -311,9 +313,17 @@ impl SignerCoordinator { // this is used to track the start of the waiting cycle let mut rejections_timer = Instant::now(); // the amount of current rejections to eventually modify the timeout - let mut rejections: u32 = 0; + let mut rejections: u64 = 0; // default timeout - let mut rejections_timeout = Duration::from_secs(BLOCK_REJECTIONS_TIMEOUT_BASE); + let mut rejections_timeout = self + .block_rejection_timeout_steps + .range((Included(0), Included(rejections))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); loop { @@ -324,7 +334,7 @@ impl SignerCoordinator { block_signer_sighash, &mut block_status_tracker, rejections_timer, - rejections_timeout, + *rejections_timeout.1, EVENT_RECEIVER_POLL, )? { Some(status) => status, @@ -358,10 +368,10 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } - if rejections_timer.elapsed() > rejections_timeout { + if rejections_timer.elapsed() > *rejections_timeout.1 { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_timeout" => rejections_timeout.1.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); @@ -374,17 +384,21 @@ impl SignerCoordinator { } }; - if rejections != block_status.total_reject_weight { + if rejections != block_status.total_reject_weight as u64 { rejections_timer = Instant::now(); - rejections = block_status.total_reject_weight; - rejections_timeout = Duration::from_secs_f32( - BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - - (BLOCK_REJECTIONS_TIMEOUT_BASE as f32 - * ((rejections as f32 / self.weight_threshold as f32).powf(2.0))), - ); + rejections = block_status.total_reject_weight as u64; + rejections_timeout = self + .block_rejection_timeout_steps + .range((Included(0), Included((rejections as f64 / self.total_weight as f64) as u64))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(rejections_timeout); + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout.1); } if block_status @@ -404,10 +418,10 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else if rejections_timer.elapsed() > rejections_timeout { + } else if rejections_timer.elapsed() > *rejections_timeout.1 { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_timeout" => rejections_timeout.1.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ddde8bd5da..ba10f9e792 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7826,7 +7826,11 @@ fn block_validation_check_rejection_timeout_heuristic() { |config| { config.block_proposal_validation_timeout = timeout; }, - |_| {}, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config.miner.block_rejection_timeout_steps.insert(0, Duration::from_secs(600)); + config.miner.block_rejection_timeout_steps.insert(3, Duration::from_secs(17)); + }, None, None, ); From da16af9498dbc1d865b682fd176e4fcf8c74d8ad Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 14:50:11 -0500 Subject: [PATCH 35/82] feat: allow other transactions with tenure extends We want the heuristic to be such that the miner mines block found tenure changes quickly, only including the tenure change and the coinbase, but tenure extensions do not require this quick response, so they should include other transactions. Fixes #5577 --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/chainstate/stacks/miner.rs | 8 +- .../src/tests/nakamoto_integrations.rs | 12 +- testnet/stacks-node/src/tests/signer/v0.rs | 109 +++++++++++++++++- 4 files changed, 120 insertions(+), 10 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 363e02044f..21ad473af8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -124,6 +124,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_with_other_transactions - tests::signer::v0::tenure_extend_after_idle_miner - tests::signer::v0::tenure_extend_after_failed_miner - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f4c7286f58..9e661e4460 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2258,7 +2258,13 @@ impl StacksBlockBuilder { // nakamoto miner tenure start heuristic: // mine an empty block so you can start your tenure quickly! if let Some(tx) = initial_txs.first() { - if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { + if matches!( + &tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) { info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); return Ok((false, tx_events)); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 30c3cfed3b..375de8a367 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -247,9 +247,15 @@ pub fn check_nakamoto_empty_block_heuristics() { continue; } let txs = test_observer::parse_transactions(block); - let has_tenure_change = txs - .iter() - .any(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))); + let has_tenure_change = txs.iter().any(|tx| { + matches!( + tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) + }); if has_tenure_change { let only_coinbase_and_tenure_change = txs.iter().all(|tx| { matches!( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f9050644dc..3b963ba500 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2616,6 +2616,51 @@ fn tenure_extend_after_idle_signers() { return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner will include other transactions with a TenureExtend transaction. +fn tenure_extend_with_other_transactions() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) @@ -2627,7 +2672,7 @@ fn tenure_extend_after_idle_signers() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -2639,20 +2684,72 @@ fn tenure_extend_after_idle_signers() { None, None, ); - let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - info!("---- Waiting for a tenure extend ----"); + info!("Pause miner so it doesn't propose a block before the tenure extend"); + TEST_MINE_STALL.set(true); + + // Submit a transaction to be included with the tenure extend + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let _tx = submit_tx(&http_origin, &transfer_tx); + + info!("---- Wait for tenure extend timeout ----"); + + sleep_ms(idle_timeout.as_millis() as u64 + 1000); + + info!("---- Resume miner to propose a block with the tenure extend ----"); + TEST_MINE_STALL.set(false); // Now, wait for a block with a tenure extend wait_for(idle_timeout.as_secs() + 10, || { - Ok(last_block_contains_tenure_change_tx( - TenureChangeCause::Extended, - )) + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let (first_tx, other_txs) = transactions.split_first().unwrap(); + let raw_tx = first_tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let found_tenure_extend = match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => + { + info!("Found tenure extend transaction: {parsed:?}"); + true + } + _ => false, + }; + if found_tenure_extend { + let found_transfer = other_txs.iter().any(|tx| { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TokenTransfer(..) => true, + _ => false, + } + }); + if found_transfer { + info!("Found transfer transaction"); + Ok(true) + } else { + Err("No transfer transaction found together with the tenure extend".to_string()) + } + } else { + info!("No tenure change transaction found"); + Ok(false) + } }) .expect("Timed out waiting for a block with a tenure extend"); From 2272b8f787ffbc5cc205222c1ded3944066a3f64 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 15:32:09 -0500 Subject: [PATCH 36/82] docs: add changelog entry --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9631ccf65..226f7b5159 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Changed + +- Miner will include other transactions in blocks with tenure extend transactions (#5760) + ## [3.1.0.0.4] ### Added From 02d595c2b48ea52122307b7bede1691dc606e515 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 16:46:06 -0500 Subject: [PATCH 37/82] test: fix `tests::nakamoto_integrations::continue_tenure_extend` --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 375de8a367..f487333905 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7206,7 +7206,9 @@ fn continue_tenure_extend() { let mut tenure_block_founds = vec![]; let mut transfer_tx_included = false; let mut last_block_had_extend = false; - for block in test_observer::get_blocks() { + for pair in test_observer::get_blocks().windows(2) { + let prev_block = &pair[0]; + let block = &pair[1]; let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); @@ -7227,8 +7229,10 @@ fn continue_tenure_extend() { tenure_extends.push(parsed); } TenureChangeCause::BlockFound => { - if last_block_had_extend { - panic!("Expected a Nakamoto block to happen after tenure extend block"); + if last_block_had_extend + && prev_block["transactions"].as_array().unwrap().len() <= 1 + { + panic!("Expected other transactions to happen after tenure extend"); } tenure_block_founds.push(parsed); } From 1e5b6544e1bfd3563c662920b8566c38f0fed7d9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 17:18:02 -0500 Subject: [PATCH 38/82] test: fix `tests::signer::v0::continue_after_fast_block_no_sortition` This test was not quite matching its description and the behavior changed a bit with the changes in this PR. This commit updates the test and the description. --- testnet/stacks-node/src/tests/signer/v0.rs | 72 +++++++++++----------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3b963ba500..2bbcccaced 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6535,19 +6535,22 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Mine 2 empty burn blocks (simulate fast blocks scenario) /// Miner 2 proposes block N+1 with a TenureChangePayload /// Signers accept and the stacks tip advances to N+1 -/// Miner 2 proposes block N+2 with a TokenTransfer +/// Miner 2 proposes block N+2 with a TenureExtend /// Signers accept and the stacks tip advances to N+2 +/// Miner 2 proposes block N+3 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+3 /// Mine an empty burn block -/// Miner 2 proposes block N+3 with a TenureExtend -/// Signers accept and the chain advances to N+3 -/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Miner 2 proposes block N+4 with a TenureExtend /// Signers accept and the chain advances to N+4 +/// Miner 1 wins the next tenure and proposes a block N+5 with a TenureChangePayload +/// Signers accept and the chain advances to N+5 /// Asserts: /// - Block N+1 contains the TenureChangePayload -/// - Block N+2 contains the TokenTransfer -/// - Block N+3 contains the TenureExtend -/// - Block N+4 contains the TenureChangePayload -/// - The stacks tip advances to N+4 +/// - Block N+2 contains the TenureExtend +/// - Block N+3 contains the TokenTransfer +/// - Block N+4 contains the TenureExtend +/// - Block N+5 contains the TenureChangePayload +/// - The stacks tip advances to N+5 #[test] #[ignore] fn continue_after_fast_block_no_sortition() { @@ -6908,7 +6911,7 @@ fn continue_after_fast_block_no_sortition() { // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); - info!("------------------------- Wait for Miner B's Block N -------------------------"; + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"; "blocks_processed_before_2" => %blocks_processed_before_2, "stacks_height_before" => %stacks_height_before, "nmb_old_blocks" => %nmb_old_blocks); @@ -6923,7 +6926,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; + info!("Waiting for Miner B's Block N+1"; "blocks_mined1_val" => %blocks_mined1_val, "blocks_mined2_val" => %blocks_mined2_val, "stacks_height" => %stacks_height, @@ -6938,11 +6941,11 @@ fn continue_after_fast_block_no_sortition() { .expect("Timed out waiting for block to be mined and processed"); info!( - "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" + "------------------------- Verify Tenure Change Tx in Miner B's Block N+1 -------------------------" ); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + info!("------------------------- Wait for Miner B's Block N+2 -------------------------"); let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); @@ -6952,18 +6955,7 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); - - // wait for the tenure-extend block to be processed + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client @@ -6978,8 +6970,12 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); + info!("------------------------- Verify Miner B's Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + info!("------------------------- Wait for Miner B's Block N+3 -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let stacks_height_before = signer_test @@ -6988,22 +6984,24 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // wait for the new block with the STX transfer to be processed + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - - let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); - let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; - "blocks_mined1_val" => %blocks_mined1_val, - "blocks_mined2_val" => %blocks_mined2_val, - "stacks_height" => %stacks_height, - "observed_blocks" => %test_observer::get_blocks().len()); - Ok( blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 && stacks_height > stacks_height_before @@ -7012,7 +7010,7 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); - info!("------------------------- Verify Miner B's Block N+1 -------------------------"); + info!("------------------------- Verify Miner B's Block N+3 -------------------------"); verify_last_block_contains_transfer_tx(); @@ -7029,7 +7027,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); btc_blocks_mined += 1; - info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+4 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); info!("------------------------- Unpause Miner A's Block Commits -------------------------"); @@ -7064,7 +7062,7 @@ fn continue_after_fast_block_no_sortition() { assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+5 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!( From 3071d3601f951bd9f4e0ea14944e38fd1e840ca3 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:37:42 +0000 Subject: [PATCH 39/82] improved config system --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/config/mod.rs | 25 +++-- .../src/nakamoto_node/signer_coordinator.rs | 48 +++++--- testnet/stacks-node/src/tests/signer/v0.rs | 105 +++++++++++++++--- 4 files changed, 133 insertions(+), 46 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index bb64a1a8b7..41fb60d2e8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -133,6 +133,7 @@ jobs: - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::block_validation_check_rejection_timeout_heuristic - tests::signer::v0::block_validation_pending_table - tests::signer::v0::new_tenure_while_validating_previous_scenario - tests::signer::v0::tenure_extend_after_bad_commit diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b26676294f..2982273825 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2158,8 +2158,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, - /// - pub block_rejection_timeout_steps: BTreeMap, + /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2199,12 +2199,13 @@ impl Default for MinerConfig { tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), block_rejection_timeout_steps: { - let mut timeouts_btree = BTreeMap::::new(); - timeouts_btree.insert(0, Duration::from_secs(600)); - timeouts_btree.insert(1, Duration::from_secs(300)); - timeouts_btree.insert(2, Duration::from_secs(150)); - timeouts_btree.insert(3, Duration::from_secs(0)); - timeouts_btree + let mut rejections_timeouts_default_map = HashMap::::new(); + rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); + rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); + rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(60)); + rejections_timeouts_default_map.insert(31, Duration::from_secs(0)); + rejections_timeouts_default_map }, } } @@ -2747,14 +2748,14 @@ impl MinerConfigFile { tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), block_rejection_timeout_steps: { if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { - let mut timeouts_btree = BTreeMap::::new(); - for (slice, millis) in block_rejection_timeout_items.iter() { + let mut rejection_timeout_durations = HashMap::::new(); + for (slice, seconds) in block_rejection_timeout_items.iter() { match slice.parse::() { - Ok(slice_slot) => timeouts_btree.insert(slice_slot, Duration::from_millis(*millis)), + Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) }; } - timeouts_btree + rejection_timeout_durations } else{ miner_default_config.block_rejection_timeout_steps } diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 686d62a2d2..d04b306bfd 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -38,7 +38,7 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; -use std::collections::BTreeMap; +use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; @@ -81,7 +81,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// - block_rejection_timeout_steps: BTreeMap, + block_rejection_timeout_steps: HashMap, } impl SignerCoordinator { @@ -310,15 +310,20 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { - // this is used to track the start of the waiting cycle - let mut rejections_timer = Instant::now(); - // the amount of current rejections to eventually modify the timeout + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in self.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((self.total_weight as f64 / 100.0) * *percentage as f64) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + + // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; - // default timeout + // default timeout (the 0 entry must be always present) let mut rejections_timeout = self .block_rejection_timeout_steps - .range((Included(0), Included(rejections))) - .last() + .get(&rejections) .ok_or_else(|| { NakamotoNodeError::SigningCoordinatorFailure( "Invalid rejection timeout step function definition".into(), @@ -326,6 +331,9 @@ impl SignerCoordinator { })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); + + // this is used to track the start of the waiting cycle + let mut rejections_timer = Instant::now(); loop { // At every iteration wait for the block_status. // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) @@ -334,7 +342,7 @@ impl SignerCoordinator { block_signer_sighash, &mut block_status_tracker, rejections_timer, - *rejections_timeout.1, + *rejections_timeout, EVENT_RECEIVER_POLL, )? { Some(status) => status, @@ -368,10 +376,10 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } - if rejections_timer.elapsed() > *rejections_timeout.1 { + if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.1.as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); @@ -385,20 +393,24 @@ impl SignerCoordinator { }; if rejections != block_status.total_reject_weight as u64 { - rejections_timer = Instant::now(); rejections = block_status.total_reject_weight as u64; - rejections_timeout = self - .block_rejection_timeout_steps - .range((Included(0), Included((rejections as f64 / self.total_weight as f64) as u64))) + let rejections_timeout_tuple = block_rejection_timeout_steps + .range((Included(0), Included(rejections))) .last() .ok_or_else(|| { NakamotoNodeError::SigningCoordinatorFailure( "Invalid rejection timeout step function definition".into(), ) })?; + rejections_timeout = rejections_timeout_tuple.1; + info!("Number of received rejections updated, resetting timeout"; + "rejections" => rejections, + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); + rejections_timer = Instant::now(); #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout.1); + BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout); } if block_status @@ -418,10 +430,10 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else if rejections_timer.elapsed() > *rejections_timeout.1 { + } else if rejections_timer.elapsed() > *rejections_timeout { warn!("Timed out while waiting for responses from signers"; "elapsed" => rejections_timer.elapsed().as_secs(), - "rejections_timeout" => rejections_timeout.1.as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), "rejections" => rejections, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index ba10f9e792..b603a887a6 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7813,7 +7813,7 @@ fn block_validation_check_rejection_timeout_heuristic() { } info!("------------------------- Test Setup -------------------------"); - let num_signers = 5; + let num_signers = 20; let timeout = Duration::from_secs(30); let sender_sk = Secp256k1PrivateKey::new(); let sender_addr = tests::to_addr(&sender_sk); @@ -7828,8 +7828,22 @@ fn block_validation_check_rejection_timeout_heuristic() { }, |config| { config.miner.block_rejection_timeout_steps.clear(); - config.miner.block_rejection_timeout_steps.insert(0, Duration::from_secs(600)); - config.miner.block_rejection_timeout_steps.insert(3, Duration::from_secs(17)); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(99)); }, None, None, @@ -7849,13 +7863,8 @@ fn block_validation_check_rejection_timeout_heuristic() { let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[4]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![ - all_signers[0], - all_signers[1], - all_signers[2], - all_signers[3], - ]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..19].to_vec()); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -7865,17 +7874,19 @@ fn block_validation_check_rejection_timeout_heuristic() { .unwrap(); signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[4]]) + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 551); + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 123); info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[3], all_signers[4]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![all_signers[0], all_signers[1], all_signers[2]]); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[18], all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..18].to_vec()); next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, @@ -7885,10 +7896,72 @@ fn block_validation_check_rejection_timeout_heuristic() { .unwrap(); signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[3], all_signers[4]]) + .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 404); + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 20); + + info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[17], all_signers[18], all_signers[19]]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..17].to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections( + timeout.as_secs(), + &[all_signers[17], all_signers[18], all_signers[19]], + ) + .unwrap(); + + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 10); + + info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); + + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![ + all_signers[16], + all_signers[17], + all_signers[18], + all_signers[19], + ]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..16].to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections( + timeout.as_secs(), + &[ + all_signers[16], + all_signers[17], + all_signers[18], + all_signers[19], + ], + ) + .unwrap(); + + thread::sleep(Duration::from_secs(3)); + + assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 99); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From 9b0f23369b49366a4169043165553cd681aa4fa6 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 29 Jan 2025 07:38:12 -0800 Subject: [PATCH 40/82] fix: always send pending block validation after receiving a block validation result --- stacks-signer/src/v0/signer.rs | 47 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 70253f8258..abe05beb40 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -818,31 +818,32 @@ impl Signer { .remove_pending_block_validation(&signer_sig_hash) .unwrap_or_else(|e| warn!("{self}: Failed to remove pending block validation: {e:?}")); - let Some(response) = block_response else { - return; - }; - // Submit a proposal response to the .signers contract for miners - info!( - "{self}: Broadcasting a block response to stacks node: {response:?}"; - ); - let accepted = matches!(response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(response.into()) - { - Ok(_) => { - crate::monitoring::actions::increment_block_responses_sent(accepted); - if let Ok(Some(block_info)) = self - .signer_db - .block_lookup(&block_validate_response.signer_signature_hash()) - { - crate::monitoring::actions::record_block_response_latency(&block_info.block); + if let Some(response) = block_response { + // Submit a proposal response to the .signers contract for miners + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + ); + let accepted = matches!(response, BlockResponse::Accepted(..)); + match self + .stackerdb + .send_message_with_retry::(response.into()) + { + Ok(_) => { + crate::monitoring::actions::increment_block_responses_sent(accepted); + if let Ok(Some(block_info)) = self + .signer_db + .block_lookup(&block_validate_response.signer_signature_hash()) + { + crate::monitoring::actions::record_block_response_latency( + &block_info.block, + ); + } + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } } - Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } + }; // Check if there is a pending block validation that we need to submit to the node match self.signer_db.get_and_remove_pending_block_validation() { From f2ab500150f6fd87a760cb6399a1076f495eeb2b Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:51:52 +0000 Subject: [PATCH 41/82] fmt-stacks --- stackslib/src/config/mod.rs | 4 +--- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 2982273825..0e30de5998 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -16,7 +16,7 @@ pub mod chain_data; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; @@ -36,8 +36,6 @@ use stacks_common::util::get_epoch_time_ms; use stacks_common::util::hash::hex_bytes; use stacks_common::util::secp256k1::{Secp256k1PrivateKey, Secp256k1PublicKey}; -use std::collections::BTreeMap; - use crate::burnchains::affirmation::AffirmationMap; use crate::burnchains::bitcoin::BitcoinNetworkType; use crate::burnchains::{Burnchain, MagicBytes, PoxConstants, BLOCKSTACK_MAGIC_MAINNET}; diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index d04b306bfd..1e994b89af 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,6 +13,8 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::{BTreeMap, HashMap}; +use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; #[cfg(test)] use std::sync::LazyLock; @@ -38,8 +40,6 @@ use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; #[cfg(test)] use stacks_common::util::tests::TestFlag; -use std::collections::{BTreeMap, HashMap}; -use std::ops::Bound::Included; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; From 5c0805a73a7e55a187f9911745b7a335ce0d13ef Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:58:01 +0000 Subject: [PATCH 42/82] fixed default rejections timeout --- stackslib/src/config/mod.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 0e30de5998..dc79732486 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2201,8 +2201,7 @@ impl Default for MinerConfig { rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); - rejections_timeouts_default_map.insert(30, Duration::from_secs(60)); - rejections_timeouts_default_map.insert(31, Duration::from_secs(0)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); rejections_timeouts_default_map }, } From f22a9907ba62a76c5a807b677da6a396f1ed4eed Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 15:59:32 +0000 Subject: [PATCH 43/82] improved comment for rejections timeout --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1e994b89af..a62efb9570 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -80,7 +80,7 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, - /// + /// The tiemout configuration based on the percentage of rejections block_rejection_timeout_steps: HashMap, } From 4f4b58481dc43ba540e87b4d66376323cb583531 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:05:18 +0000 Subject: [PATCH 44/82] fixed typo --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index a62efb9570..32c428534f 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -80,7 +80,7 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, - /// The tiemout configuration based on the percentage of rejections + /// The timeout configuration based on the percentage of rejections block_rejection_timeout_steps: HashMap, } From 463839f3f8a0ab12ffed52673522ec0f288801bd Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:08:24 +0000 Subject: [PATCH 45/82] ensure 0 key is specified for rejections timeout --- stackslib/src/config/mod.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index dc79732486..7897730d70 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2749,9 +2749,12 @@ impl MinerConfigFile { for (slice, seconds) in block_rejection_timeout_items.iter() { match slice.parse::() { Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), - Err(e) => panic!("block_rejection_timeout_items keys must be unsigned integers: {}", e) + Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) }; } + if !rejection_timeout_durations.contains_key(&0) { + panic!("block_rejection_timeout_steps requires a definition for the '0' key/step"); + } rejection_timeout_durations } else{ miner_default_config.block_rejection_timeout_steps From 0e7461c8c24455028841c4d19198117b97fa707d Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Wed, 29 Jan 2025 16:23:48 +0000 Subject: [PATCH 46/82] message typo --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 32c428534f..096f9eb74e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -384,7 +384,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while tried reaching the threshold".into(), + "Gave up while trying reaching the threshold".into(), )); } @@ -438,7 +438,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while tried reaching the threshold".into(), + "Gave up while trying reaching the threshold".into(), )); } else { continue; From 3b1a3e8aa296d3f8a586fd556b1e265a3aa85f77 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 Jan 2025 15:12:19 -0600 Subject: [PATCH 47/82] test: fix some test flake in partial_tenure_forking --- testnet/stacks-node/src/globals.rs | 2 +- .../stacks-node/src/nakamoto_node/relayer.rs | 2 +- testnet/stacks-node/src/run_loop/neon.rs | 13 +++++++-- testnet/stacks-node/src/tests/signer/mod.rs | 3 ++ testnet/stacks-node/src/tests/signer/v0.rs | 28 +++++++++++-------- 5 files changed, 32 insertions(+), 16 deletions(-) diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 2a9a601723..ca96a1f81c 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -53,7 +53,7 @@ pub struct Globals { unconfirmed_txs: Arc>, /// Writer endpoint to the relayer thread pub relay_send: SyncSender, - /// Cointer state in the main thread + /// Counter state in the main thread pub counters: Counters, /// Connection to the PoX sync watchdog pub sync_comms: PoxSyncWatchdogComms, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index e8b848e748..7bd71afbb9 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -1628,7 +1628,7 @@ impl RelayerThread { self.last_commits.insert(txid); self.globals .counters - .bump_naka_submitted_commits(last_committed.burn_tip.block_height); + .bump_naka_submitted_commits(last_committed.burn_tip.block_height, tip_height); self.last_committed = Some(last_committed); Ok(()) diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 277b9612fa..0c64444017 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -116,6 +116,7 @@ pub struct Counters { pub naka_mined_tenures: RunLoopCounter, pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, + pub naka_submitted_commit_last_stacks_tip: RunLoopCounter, #[cfg(test)] pub naka_skip_commit_op: TestFlag, @@ -170,11 +171,19 @@ impl Counters { Counters::inc(&self.naka_submitted_vrfs); } - pub fn bump_naka_submitted_commits(&self, committed_height: u64) { + pub fn bump_naka_submitted_commits( + &self, + committed_burn_height: u64, + committed_stacks_height: u64, + ) { Counters::inc(&self.naka_submitted_commits); Counters::set( &self.naka_submitted_commit_last_burn_height, - committed_height, + committed_burn_height, + ); + Counters::set( + &self.naka_submitted_commit_last_stacks_tip, + committed_stacks_height, ); } diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 6ef2431a3a..fba2194edb 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -97,6 +97,7 @@ pub struct RunningNodes { pub nakamoto_blocks_signer_pushed: RunLoopCounter, pub nakamoto_miner_directives: Arc, pub nakamoto_test_skip_commit_op: TestFlag, + pub counters: Counters, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -947,6 +948,7 @@ fn setup_stx_btc_node( naka_signer_pushed_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -984,6 +986,7 @@ fn setup_stx_btc_node( nakamoto_test_skip_commit_op, nakamoto_miner_directives: naka_miner_directives.0, coord_channel, + counters, conf: naka_conf, } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2bbcccaced..6439ff8e6e 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -5031,6 +5031,8 @@ fn partial_tenure_fork() { naka_skip_commit_op: rl2_skip_commit_op, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); + let rl1_counters = signer_test.running_nodes.counters.clone(); signer_test.boot_to_epoch_3(); let run_loop_2_thread = thread::Builder::new() @@ -5101,35 +5103,37 @@ fn partial_tenure_fork() { rl1_skip_commit_op.set(true); rl2_skip_commit_op.set(true); - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let commits_before_1 = commits_1.load(Ordering::SeqCst); - let commits_before_2 = commits_2.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); // Mine the first block next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 180, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - - Ok(mined_1 > mined_before_1 || mined_2 > mined_before_2) + let info_1 = get_chain_info(&conf); + Ok(info_1.stacks_tip_height > info_before.stacks_tip_height) }, ) .expect("Timed out waiting for new Stacks block to be mined"); info!("-------- Mined first block, wait for block commits --------"); + let info_before = get_chain_info(&conf); + // Unpause block commits and wait for both miners' commits rl1_skip_commit_op.set(false); rl2_skip_commit_op.set(false); - // Ensure that both block commits have been sent before continuing + // Ensure that both miners' commits point at the stacks tip wait_for(60, || { - let commits_after_1 = commits_1.load(Ordering::SeqCst); - let commits_after_2 = commits_2.load(Ordering::SeqCst); - Ok(commits_after_1 > commits_before_1 && commits_after_2 > commits_before_2) + let last_committed_1 = rl1_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + let last_committed_2 = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(last_committed_1 >= info_before.stacks_tip_height + && last_committed_2 >= info_before.stacks_tip_height) }) .expect("Timed out waiting for block commits"); From a1e5deedb394cb12848f276141808440a8cb4fa1 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Wed, 29 Jan 2025 16:02:22 -0600 Subject: [PATCH 48/82] test: fix merge artifact --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 215278aec2..71775742d0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1761,7 +1761,7 @@ fn restarting_miner() { naka_conf.miner.activated_vrf_key_path = Some(format!("{}/vrf_key", naka_conf.node.working_dir)); naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::from_seed(&[1, 2, 1, 2, 1, 2]); // setup sender + recipient for a test stx transfer let sender_addr = tests::to_addr(&sender_sk); let send_amt = 1000; @@ -1770,7 +1770,7 @@ fn restarting_miner() { PrincipalData::from(sender_addr).to_string(), send_amt * 2 + send_fee, ); - let sender_signer_sk = Secp256k1PrivateKey::new(); + let sender_signer_sk = Secp256k1PrivateKey::from_seed(&[3, 2, 3, 2, 3, 2]); let sender_signer_addr = tests::to_addr(&sender_signer_sk); let mut signers = TestSigners::new(vec![sender_signer_sk]); naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); From 420f861f7e6acc8cd9bd2d07899254198ec16a14 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 10:22:19 +0000 Subject: [PATCH 49/82] use Counters instead of global test vars --- .../src/nakamoto_node/signer_coordinator.rs | 17 ++-- testnet/stacks-node/src/run_loop/neon.rs | 7 +- testnet/stacks-node/src/tests/signer/mod.rs | 5 ++ testnet/stacks-node/src/tests/signer/v0.rs | 80 ++++++++++++++++--- 4 files changed, 86 insertions(+), 23 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 096f9eb74e..4132bb4284 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -16,8 +16,6 @@ use std::collections::{BTreeMap, HashMap}; use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; -#[cfg(test)] -use std::sync::LazyLock; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; use std::time::{Duration, Instant}; @@ -38,8 +36,6 @@ use stacks::types::chainstate::{StacksBlockId, StacksPrivateKey}; use stacks::util::hash::Sha512Trunc256Sum; use stacks::util::secp256k1::MessageSignature; use stacks::util_lib::boot::boot_code_id; -#[cfg(test)] -use stacks_common::util::tests::TestFlag; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; @@ -50,11 +46,6 @@ use crate::nakamoto_node::stackerdb_listener::{ use crate::neon::Counters; use crate::Config; -#[cfg(test)] -/// Test-only value for storing the current rejection based timeout -pub static BLOCK_REJECTIONS_CURRENT_TIMEOUT: LazyLock> = - LazyLock::new(TestFlag::default); - /// The state of the signer database listener, used by the miner thread to /// interact with the signer listener. pub struct SignerCoordinator { @@ -410,7 +401,13 @@ impl SignerCoordinator { rejections_timer = Instant::now(); #[cfg(test)] - BLOCK_REJECTIONS_CURRENT_TIMEOUT.set(*rejections_timeout); + { + Counters::set( + &counters.naka_miner_current_rejections_timeout_secs, + rejections_timeout.as_secs(), + ); + Counters::set(&counters.naka_miner_current_rejections, rejections); + } } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index deead51066..174b4d40a6 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -115,6 +115,11 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, + #[cfg(test)] + pub naka_miner_current_rejections: RunLoopCounter, + #[cfg(test)] + pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, + #[cfg(test)] pub naka_skip_commit_op: TestFlag, } @@ -133,7 +138,7 @@ impl Counters { fn inc(_ctr: &RunLoopCounter) {} #[cfg(test)] - fn set(ctr: &RunLoopCounter, value: u64) { + pub fn set(ctr: &RunLoopCounter, value: u64) { ctr.0.store(value, Ordering::SeqCst); } diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 47958e8690..67ad402a86 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -98,6 +98,7 @@ pub struct RunningNodes { pub nakamoto_test_skip_commit_op: TestFlag, pub coord_channel: Arc>, pub conf: NeonConfig, + pub counters: Counters, } /// A test harness for running a v0 or v1 signer integration test @@ -943,6 +944,9 @@ fn setup_stx_btc_node( } = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); + + let run_loop_counters = run_loop.counters(); + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! @@ -978,5 +982,6 @@ fn setup_stx_btc_node( nakamoto_miner_directives: naka_miner_directives.0, coord_channel, conf: naka_conf, + counters: run_loop_counters, } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index b603a887a6..ec818d34a0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -76,7 +76,6 @@ use crate::event_dispatcher::{MinedNakamotoBlockEvent, TEST_SKIP_BLOCK_ANNOUNCEM use crate::nakamoto_node::miner::{ TEST_BLOCK_ANNOUNCE_STALL, TEST_BROADCAST_STALL, TEST_MINE_STALL, }; -use crate::nakamoto_node::signer_coordinator::BLOCK_REJECTIONS_CURRENT_TIMEOUT; use crate::nakamoto_node::stackerdb_listener::TEST_IGNORE_SIGNERS; use crate::neon::Counters; use crate::run_loop::boot_nakamoto; @@ -7877,9 +7876,23 @@ fn block_validation_check_rejection_timeout_heuristic() { .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 123); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 1) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 123 + ); info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); @@ -7899,9 +7912,23 @@ fn block_validation_check_rejection_timeout_heuristic() { .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 20); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 2) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 20 + ); info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); @@ -7924,9 +7951,24 @@ fn block_validation_check_rejection_timeout_heuristic() { ) .unwrap(); - thread::sleep(Duration::from_secs(3)); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 3) + }) + .unwrap(); - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 10); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 10 + ); info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); @@ -7959,9 +8001,23 @@ fn block_validation_check_rejection_timeout_heuristic() { ) .unwrap(); - thread::sleep(Duration::from_secs(3)); - - assert_eq!(BLOCK_REJECTIONS_CURRENT_TIMEOUT.get().as_secs(), 99); + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= 4) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + 99 + ); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From 57f80fb79785e0994e565fb954e3ce628a9b3378 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 10:57:46 +0000 Subject: [PATCH 50/82] restored original wait_for_block_status --- .../src/nakamoto_node/signer_coordinator.rs | 18 ++++++++++--- .../src/nakamoto_node/stackerdb_listener.rs | 26 +++++++++---------- 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4132bb4284..8a05600771 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -331,12 +331,22 @@ impl SignerCoordinator { // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, - &mut block_status_tracker, - rejections_timer, - *rejections_timeout, EVENT_RECEIVER_POLL, + |status| { + if rejections_timer.elapsed() > *rejections_timeout { + return false; + } + if *status != block_status_tracker { + return false; + } + return true; + }, )? { - Some(status) => status, + Some(status) => { + // keep track of the last status + block_status_tracker = status.clone(); + status + } None => { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 92688c0075..5ac31c60d1 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -480,32 +480,30 @@ impl StackerDBListenerComms { /// Get the status for `block` from the Stacker DB listener. /// If the block is not found in the map, return an error. - /// If the block is found, return it. + /// If the block is found, call `condition` to check if the block status + /// satisfies the condition. + /// If the condition is satisfied, return the block status as + /// `Ok(Some(status))`. + /// If the condition is not satisfied, wait for it to be satisfied. /// If the timeout is reached, return `Ok(None)`. - pub fn wait_for_block_status( + pub fn wait_for_block_status( &self, block_signer_sighash: &Sha512Trunc256Sum, - block_status_tracker: &mut BlockStatus, - rejections_timer: std::time::Instant, - rejections_timeout: Duration, timeout: Duration, - ) -> Result, NakamotoNodeError> { + condition: F, + ) -> Result, NakamotoNodeError> + where + F: Fn(&BlockStatus) -> bool, + { let (lock, cvar) = &*self.blocks; let blocks = lock.lock().expect("FATAL: failed to lock block status"); let (guard, timeout_result) = cvar .wait_timeout_while(blocks, timeout, |map| { - if rejections_timer.elapsed() > rejections_timeout { - return true; - } let Some(status) = map.get(block_signer_sighash) else { return true; }; - if status != block_status_tracker { - *block_status_tracker = status.clone(); - return false; - } - return true; + condition(status) }) .expect("FATAL: failed to wait on block status cond var"); From d5abfdc0daa73ba8aaf9b67b1bb61c9f0cc17cd4 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:00:53 +0000 Subject: [PATCH 51/82] removed empty line --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index 5ac31c60d1..ddaa14a8fd 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -376,7 +376,6 @@ impl StackerDBListener { } }; block.responded_signers.insert(rejected_pubkey); - block.total_reject_weight = block .total_reject_weight .checked_add(signer_entry.weight) From 5269714c1438257d9a32aa57f2c84542864f3fed Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:06:20 +0000 Subject: [PATCH 52/82] updated CHANGELOG --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226f7b5159..7d9ecb3b16 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response ## [3.1.0.0.4] From 5a70c4397446a873522a2f3bb2bfbc8b389a6f93 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:07:00 +0000 Subject: [PATCH 53/82] updated CHANGELOG --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 7d9ecb3b16..57f940fa31 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) -- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response (#5705) ## [3.1.0.0.4] From c424212db56a076fa482dabf920c45b4a10a6f30 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 11:13:17 +0000 Subject: [PATCH 54/82] Secp256k1PrivateKey::random() instrad of new() --- testnet/stacks-node/src/tests/signer/v0.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2c483c523c..2c98c11e67 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7899,7 +7899,7 @@ fn block_validation_check_rejection_timeout_heuristic() { info!("------------------------- Test Setup -------------------------"); let num_signers = 20; let timeout = Duration::from_secs(30); - let sender_sk = Secp256k1PrivateKey::new(); + let sender_sk = Secp256k1PrivateKey::random(); let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; From d71329003e3e697d2e36e9b279bef2d89954f8db Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:30:52 +0000 Subject: [PATCH 55/82] removed useless test attributes --- .../src/nakamoto_node/signer_coordinator.rs | 13 +++++-------- testnet/stacks-node/src/run_loop/neon.rs | 4 +--- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 2de14708f2..ae68c22cc5 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -410,14 +410,11 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); rejections_timer = Instant::now(); - #[cfg(test)] - { - Counters::set( - &counters.naka_miner_current_rejections_timeout_secs, - rejections_timeout.as_secs(), - ); - Counters::set(&counters.naka_miner_current_rejections, rejections); - } + Counters::set( + &counters.naka_miner_current_rejections_timeout_secs, + rejections_timeout.as_secs(), + ); + Counters::set(&counters.naka_miner_current_rejections, rejections); } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 89973aa9aa..01813b6812 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -117,9 +117,7 @@ pub struct Counters { pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, - #[cfg(test)] pub naka_miner_current_rejections: RunLoopCounter, - #[cfg(test)] pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, #[cfg(test)] @@ -145,7 +143,7 @@ impl Counters { } #[cfg(not(test))] - fn set(_ctr: &RunLoopCounter, _value: u64) {} + pub fn set(_ctr: &RunLoopCounter, _value: u64) {} pub fn bump_blocks_processed(&self) { Counters::inc(&self.blocks_processed); From adaabe60570acb199e577c11c633eb97078af115 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:47:49 +0000 Subject: [PATCH 56/82] use u32 for block_rejection_timeout_steps keys --- stackslib/src/config/mod.rs | 8 ++++---- .../src/nakamoto_node/signer_coordinator.rs | 20 +++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 3fa2d3a005..b68e55bd03 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -2156,7 +2156,7 @@ pub struct MinerConfig { /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections - pub block_rejection_timeout_steps: HashMap, + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2196,7 +2196,7 @@ impl Default for MinerConfig { tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), block_rejection_timeout_steps: { - let mut rejections_timeouts_default_map = HashMap::::new(); + let mut rejections_timeouts_default_map = HashMap::::new(); rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); @@ -2744,9 +2744,9 @@ impl MinerConfigFile { tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), block_rejection_timeout_steps: { if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { - let mut rejection_timeout_durations = HashMap::::new(); + let mut rejection_timeout_durations = HashMap::::new(); for (slice, seconds) in block_rejection_timeout_items.iter() { - match slice.parse::() { + match slice.parse::() { Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) }; diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ae68c22cc5..1e2dba7f05 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -72,7 +72,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: HashMap, + block_rejection_timeout_steps: HashMap, } impl SignerCoordinator { @@ -305,21 +305,21 @@ impl SignerCoordinator { let mut block_rejection_timeout_steps = BTreeMap::::new(); for (percentage, duration) in self.block_rejection_timeout_steps.iter() { let rejections_amount = - ((self.total_weight as f64 / 100.0) * *percentage as f64) as u64; + ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; block_rejection_timeout_steps.insert(rejections_amount, *duration); } // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) - let mut rejections_timeout = self - .block_rejection_timeout_steps - .get(&rejections) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Invalid rejection timeout step function definition".into(), - ) - })?; + let mut rejections_timeout = + block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); From 7cfbfddb8b0034e40b9fd4edf766655d76260868 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 15:54:04 +0000 Subject: [PATCH 57/82] refactored block_rejection_timeout_steps percentage setup --- .../src/nakamoto_node/signer_coordinator.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1e2dba7f05..dfddd0a7b2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -279,12 +279,21 @@ impl SignerCoordinator { } } + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in self.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + self.get_block_status( &block.header.signer_signature_hash(), &block.block_id(), chain_state, sortdb, counters, + &block_rejection_timeout_steps, ) } @@ -300,15 +309,8 @@ impl SignerCoordinator { chain_state: &mut StacksChainState, sortdb: &SortitionDB, counters: &Counters, + block_rejection_timeout_steps: &BTreeMap, ) -> Result, NakamotoNodeError> { - // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); - for (percentage, duration) in self.block_rejection_timeout_steps.iter() { - let rejections_amount = - ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; - block_rejection_timeout_steps.insert(rejections_amount, *duration); - } - // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) From d4171d7cce23cee936319a97fe40b965b90470c4 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 16:05:10 +0000 Subject: [PATCH 58/82] do not reset rejections_timer on block_status updates --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index dfddd0a7b2..eb213710e2 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -326,7 +326,7 @@ impl SignerCoordinator { let mut block_status_tracker = BlockStatus::default(); // this is used to track the start of the waiting cycle - let mut rejections_timer = Instant::now(); + let rejections_timer = Instant::now(); loop { // At every iteration wait for the block_status. // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) @@ -410,7 +410,6 @@ impl SignerCoordinator { "rejections" => rejections, "rejections_timeout" => rejections_timeout.as_secs(), "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - rejections_timer = Instant::now(); Counters::set( &counters.naka_miner_current_rejections_timeout_secs, From d380366b33c668c701bdddb86e4bf840cb9fb61e Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 30 Jan 2025 11:50:07 -0500 Subject: [PATCH 59/82] chore: Update `syn` version --- Cargo.lock | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index e56b89f8d7..9a39c4c10b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2834,7 +2834,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] From fbc3b7d34124acbc2b3d9ced95dacf5a475c22b5 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Thu, 30 Jan 2025 17:38:08 +0000 Subject: [PATCH 60/82] moved block_rejection_timeout_steps computation directly in SignerCoordinator::new() --- .../src/nakamoto_node/signer_coordinator.rs | 43 +++++++++---------- 1 file changed, 21 insertions(+), 22 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index eb213710e2..1fdb609ad1 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,7 +13,7 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::collections::{BTreeMap, HashMap}; +use std::collections::BTreeMap; use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; @@ -72,7 +72,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: HashMap, + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -108,6 +108,14 @@ impl SignerCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u64; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + let mut sc = Self { message_key, is_mainnet, @@ -118,7 +126,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), - block_rejection_timeout_steps: config.miner.block_rejection_timeout_steps.clone(), + block_rejection_timeout_steps, }; // Spawn the signer DB listener thread @@ -279,21 +287,12 @@ impl SignerCoordinator { } } - // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); - for (percentage, duration) in self.block_rejection_timeout_steps.iter() { - let rejections_amount = - ((f64::from(self.total_weight) / 100.0) * f64::from(*percentage)) as u64; - block_rejection_timeout_steps.insert(rejections_amount, *duration); - } - self.get_block_status( &block.header.signer_signature_hash(), &block.block_id(), chain_state, sortdb, counters, - &block_rejection_timeout_steps, ) } @@ -309,19 +308,18 @@ impl SignerCoordinator { chain_state: &mut StacksChainState, sortdb: &SortitionDB, counters: &Counters, - block_rejection_timeout_steps: &BTreeMap, ) -> Result, NakamotoNodeError> { // the amount of current rejections (used to eventually modify the timeout) let mut rejections: u64 = 0; // default timeout (the 0 entry must be always present) - let mut rejections_timeout = - block_rejection_timeout_steps - .get(&rejections) - .ok_or_else(|| { - NakamotoNodeError::SigningCoordinatorFailure( - "Invalid rejection timeout step function definition".into(), - ) - })?; + let mut rejections_timeout = self + .block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; // this is used for comparing block_status to identify if it has been changed from the previous event let mut block_status_tracker = BlockStatus::default(); @@ -397,7 +395,8 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight as u64 { rejections = block_status.total_reject_weight as u64; - let rejections_timeout_tuple = block_rejection_timeout_steps + let rejections_timeout_tuple = self + .block_rejection_timeout_steps .range((Included(0), Included(rejections))) .last() .ok_or_else(|| { From 790c1e1db2bc85accccb4ba0b1fa500de2a30fdb Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 10:39:36 +0000 Subject: [PATCH 61/82] merged with develop, reintroduces old logic for condition variables, improved rejections comparison --- .../src/nakamoto_node/signer_coordinator.rs | 19 +++++++------------ .../src/nakamoto_node/stackerdb_listener.rs | 16 ++++++++++++---- testnet/stacks-node/src/tests/signer/mod.rs | 4 ---- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 1fdb609ad1..ec827033ba 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -40,9 +40,7 @@ use stacks::util_lib::boot::boot_code_id; use super::stackerdb_listener::StackerDBListenerComms; use super::Error as NakamotoNodeError; use crate::event_dispatcher::StackerDBChannel; -use crate::nakamoto_node::stackerdb_listener::{ - BlockStatus, StackerDBListener, EVENT_RECEIVER_POLL, -}; +use crate::nakamoto_node::stackerdb_listener::{StackerDBListener, EVENT_RECEIVER_POLL}; use crate::neon::Counters; use crate::Config; @@ -320,8 +318,6 @@ impl SignerCoordinator { "Invalid rejection timeout step function definition".into(), ) })?; - // this is used for comparing block_status to identify if it has been changed from the previous event - let mut block_status_tracker = BlockStatus::default(); // this is used to track the start of the waiting cycle let rejections_timer = Instant::now(); @@ -333,20 +329,19 @@ impl SignerCoordinator { block_signer_sighash, EVENT_RECEIVER_POLL, |status| { + // rejections-based timeout expired? if rejections_timer.elapsed() > *rejections_timeout { return false; } - if *status != block_status_tracker { + // number or rejections changed? + if status.total_reject_weight as u64 != rejections { return false; } - return true; + // enough signatures? + return status.total_weight_signed < self.weight_threshold; }, )? { - Some(status) => { - // keep track of the last status - block_status_tracker = status.clone(); - status - } + Some(status) => status, None => { // If we just received a timeout, we should check if the burnchain // tip has changed or if we received this signed block already in diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index ddaa14a8fd..cf9ce4b6d7 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -337,8 +337,10 @@ impl StackerDBListener { block.gathered_signatures.insert(slot_id, signature); block.responded_signers.insert(signer_pubkey); - // Signal to anyone waiting on this block that we have a new status - cvar.notify_all(); + if block.total_weight_signed >= self.weight_threshold { + // Signal to anyone waiting on this block that we have enough signatures + cvar.notify_all(); + } // Update the idle timestamp for this signer self.update_idle_timestamp( @@ -394,8 +396,14 @@ impl StackerDBListener { "server_version" => rejected_data.metadata.server_version, ); - // Signal to anyone waiting on this block that we have a new status - cvar.notify_all(); + if block + .total_reject_weight + .saturating_add(self.weight_threshold) + > self.total_weight + { + // Signal to anyone waiting on this block that we have enough rejections + cvar.notify_all(); + } // Update the idle timestamp for this signer self.update_idle_timestamp( diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 836a9eee0a..a68a4c77fb 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -86,7 +86,6 @@ pub struct RunningNodes { pub counters: Counters, pub coord_channel: Arc>, pub conf: NeonConfig, - pub counters: Counters, } /// A test harness for running a v0 or v1 signer integration test @@ -939,8 +938,6 @@ fn setup_stx_btc_node( let coord_channel = run_loop.coordinator_channels(); - let run_loop_counters = run_loop.counters(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! @@ -978,6 +975,5 @@ fn setup_stx_btc_node( coord_channel, counters, conf: naka_conf, - counters: run_loop_counters, } } From 54aed16067a48c71dada0cd1fa8067d0866b143a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 10:41:03 +0000 Subject: [PATCH 62/82] rollback BlockStatus Default and PartialEq --- testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs index cf9ce4b6d7..834c59fa95 100644 --- a/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs +++ b/testnet/stacks-node/src/nakamoto_node/stackerdb_listener.rs @@ -50,7 +50,7 @@ pub static TEST_IGNORE_SIGNERS: LazyLock> = LazyLock::new(TestFla /// waking up to check timeouts? pub static EVENT_RECEIVER_POLL: Duration = Duration::from_millis(500); -#[derive(Debug, Clone, Default, PartialEq)] +#[derive(Debug, Clone)] pub struct BlockStatus { pub responded_signers: HashSet, pub gathered_signatures: BTreeMap, From fd09625cd4febaa716da8e024ea1cb0d08aa2352 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 07:40:26 -0600 Subject: [PATCH 63/82] test: fix flake in multiple_miners_empty_sortition --- testnet/stacks-node/src/tests/signer/v0.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2e71cb8910..30b61477cd 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -11737,13 +11737,17 @@ fn multiple_miners_empty_sortition() { // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); + signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); From 3ea5c858fa3d089095cd45bf737141c4e6c646c5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 09:15:06 -0600 Subject: [PATCH 64/82] test: refactor nakamoto tests to use better wait-for-commit logic --- .../src/tests/nakamoto_integrations.rs | 374 +++++------------- testnet/stacks-node/src/tests/signer/mod.rs | 13 +- testnet/stacks-node/src/tests/signer/v0.rs | 49 ++- 3 files changed, 120 insertions(+), 316 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 71775742d0..0c60902fa0 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -728,14 +728,14 @@ pub fn next_block_and_process_new_stacks_block( pub fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], true, ) } @@ -745,14 +745,14 @@ pub fn next_block_and_mine_commit( pub fn next_block_and_commits_only( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], false, ) } @@ -765,98 +765,48 @@ pub fn next_block_and_commits_only( pub fn next_block_and_wait_for_commits( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &[&Arc>], - commits_submitted: &[&Arc], + node_confs: &[&Config], + node_counters: &[&Counters], wait_for_stacks_block: bool, ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.to_vec(); - let blocks_processed_before: Vec<_> = coord_channels + let infos_before: Vec<_> = node_confs.iter().map(|c| get_chain_info(c)).collect(); + let burn_ht_before = infos_before .iter() - .map(|x| { - x.lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed() - }) + .map(|info| info.burn_block_height) + .max() + .unwrap(); + let stacks_ht_before = infos_before + .iter() + .map(|info| info.stacks_tip_height) + .max() + .unwrap(); + let last_commit_burn_hts: Vec<_> = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_burn_height) .collect(); - let commits_before: Vec<_> = commits_submitted + let last_commit_stacks_hts: Vec<_> = node_counters .iter() - .map(|x| x.load(Ordering::SeqCst)) + .map(|c| &c.naka_submitted_commit_last_stacks_tip) .collect(); - let mut block_processed_time: Vec> = vec![None; commits_before.len()]; - let mut commit_sent_time: Vec> = vec![None; commits_before.len()]; next_block_and(btc_controller, timeout_secs, || { - for i in 0..commits_submitted.len() { - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { - block_processed_time[i].replace(now); - } - if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { - commit_sent_time[i].replace(now); - } - } - + let burn_height_committed_to = last_commit_burn_hts.iter().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { - for i in 0..commits_submitted.len() { - // just wait for the commit - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - if commits_sent <= commits_before[i] { - return Ok(false); - } - - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 1 { - continue; - } - return Ok(false); - } - return Ok(true); - } - - // waiting for both commit and stacks block - for i in 0..commits_submitted.len() { - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - - if blocks_processed > blocks_processed_before[i] { - // either we don't care about the stacks block count, or the block count advanced. - // Check the block-commits. - let block_processed_time = block_processed_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed block time wasn't set")?; - if commits_sent <= commits_before[i] { - return Ok(false); - } - let commit_sent_time = commit_sent_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed commit time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - continue; - } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 2 { - continue; - } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - continue; - } - return Ok(false); - } else { + Ok(burn_height_committed_to) + } else { + if !burn_height_committed_to { return Ok(false); } + let stacks_tip_committed_to = + last_commit_stacks_hts + .iter() + .all(|last_commit_stacks_height| { + last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before + }); + return Ok(stacks_tip_committed_to); } - Ok(true) }) } @@ -1541,6 +1491,7 @@ fn simple_neon_integration() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let node_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -1598,13 +1549,8 @@ fn simple_neon_integration() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // Submit a TX @@ -1652,13 +1598,8 @@ fn simple_neon_integration() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1794,17 +1735,17 @@ fn restarting_miner() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let rl1_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); let _run_loop_2_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed: blocks_processed_2, - naka_submitted_commits: commits_submitted_2, naka_proposed_blocks: proposals_submitted_2, .. } = run_loop_2.counters(); - let coord_channel_2 = run_loop_2.coordinator_channels(); + let rl2_counters = run_loop.counters(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); @@ -1846,13 +1787,8 @@ fn restarting_miner() { // Mine 2 nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl1_counters) + .unwrap(); } let last_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1915,13 +1851,8 @@ fn restarting_miner() { // Mine 2 more nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel_2, - &commits_submitted_2, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl2_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2020,6 +1951,7 @@ fn flash_blocks_on_epoch_3() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2094,13 +2026,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // Submit a TX @@ -2136,13 +2062,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2719,6 +2639,7 @@ fn correct_burn_outs() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2891,12 +2812,9 @@ fn correct_burn_outs() { let prior_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - if let Err(e) = next_block_and_mine_commit( - &mut btc_regtest_controller, - 30, - &coord_channel, - &commits_submitted, - ) { + if let Err(e) = + next_block_and_mine_commit(&mut btc_regtest_controller, 30, &naka_conf, &counters) + { warn!( "Error while minting a bitcoin block and waiting for stacks-node activity: {e:?}" ); @@ -3036,6 +2954,7 @@ fn block_proposal_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3076,13 +2995,7 @@ fn block_proposal_api_endpoint() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -3414,6 +3327,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3434,13 +3348,7 @@ fn miner_writes_proposed_block_to_stackerdb() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -3525,6 +3433,7 @@ fn vote_for_aggregate_key_burn_op() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3607,13 +3516,7 @@ fn vote_for_aggregate_key_burn_op() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -3663,13 +3566,7 @@ fn vote_for_aggregate_key_burn_op() { // the second block should process the vote, after which the vote should be set for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let mut vote_for_aggregate_key_found = false; @@ -4631,6 +4528,7 @@ fn burn_ops_integration_test() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -4682,13 +4580,7 @@ fn burn_ops_integration_test() { "Pre-stx operation should submit successfully" ); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); @@ -4816,13 +4708,7 @@ fn burn_ops_integration_test() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -4979,13 +4865,7 @@ fn burn_ops_integration_test() { // the second block should process the ops // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); for interim_block_ix in 0..2 { info!("Mining interim block {interim_block_ix}"); let blocks_processed_before = coord_channel @@ -6032,6 +5912,7 @@ fn nakamoto_attempt_time() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6071,13 +5952,7 @@ fn nakamoto_attempt_time() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -6603,6 +6478,7 @@ fn signer_chainstate() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6680,13 +6556,7 @@ fn signer_chainstate() { // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; for i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { @@ -7197,6 +7067,7 @@ fn continue_tenure_extend() { naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7258,13 +7129,7 @@ fn continue_tenure_extend() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); wait_for(5, || { let blocks_processed = coord_channel @@ -8728,6 +8593,7 @@ fn check_block_info_rewards() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -8926,13 +8792,7 @@ fn check_block_info_rewards() { // (only 2 blocks maturation time in tests) info!("Mining 6 tenures to mature the block reward"); for i in 0..6 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 20, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 20, &naka_conf, &counters).unwrap(); info!("Mined a block ({i})"); } @@ -9498,10 +9358,10 @@ fn v3_signer_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -9562,13 +9422,7 @@ fn v3_signer_api_endpoint() { // Mine some nakamoto tenures for _i in 0..naka_tenures { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -9674,7 +9528,7 @@ fn v3_blockbyheight_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9696,13 +9550,7 @@ fn v3_blockbyheight_api_endpoint() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -9797,11 +9645,10 @@ fn nakamoto_lockup_events() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9832,13 +9679,7 @@ fn nakamoto_lockup_events() { info!("------------------------- Setup finished, run test -------------------------"); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -9982,6 +9823,7 @@ fn skip_mining_long_tx() { naka_mined_blocks: mined_naka_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -10020,13 +9862,7 @@ fn skip_mining_long_tx() { // Mine a few nakamoto tenures with some interim blocks in them for i in 0..5 { let mined_before = mined_naka_blocks.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); if i == 0 { // we trigger the nakamoto miner to evaluate the long running transaction, @@ -10134,17 +9970,10 @@ fn test_shadow_recovery() { let naka_conf = signer_test.running_nodes.conf.clone(); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; - let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let counters = signer_test.running_nodes.counters.clone(); // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -10219,13 +10048,7 @@ fn test_shadow_recovery() { } // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // all shadow blocks are present and processed let mut shadow_ids = HashSet::new(); @@ -10950,6 +10773,7 @@ fn test_tenure_extend_from_flashblocks() { let coord_channel = signer_test.running_nodes.coord_channel.clone(); let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); + let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -10969,13 +10793,7 @@ fn test_tenure_extend_from_flashblocks() { .unwrap(); for _ in 0..3 { - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let burn_view_contract = r#" @@ -11021,24 +10839,12 @@ fn test_tenure_extend_from_flashblocks() { }) .expect("Timed out waiting for interim blocks to be mined"); - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // stall miner and relayer // make tenure but don't wait for a stacks block - next_block_and_commits_only( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // prevent the mienr from sending another block-commit nakamoto_test_skip_commit_op.set(true); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 1bf57444ed..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -336,15 +336,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], - commits_submitted: &[&Arc], + node_confs: &[&NeonConfig], + node_counters: &[&Counters], timeout: Duration, ) { let blocks_len = test_observer::get_blocks().len(); @@ -370,8 +369,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest rl1_commits_before) + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); From cade53755e97e2b7f4198c778e3e63dfb7c1f120 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 31 Jan 2025 10:37:40 -0500 Subject: [PATCH 65/82] fix: Disable flaky test `flash_blocks_on_epoch_3` --- .github/workflows/bitcoin-tests.yml | 3 ++- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 8 +++++++- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 13d94438f9..57b37f44e7 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,7 +83,8 @@ jobs: - tests::neon_integrations::start_stop_bitcoind - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::flash_blocks_on_epoch_3 + # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition + - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 71775742d0..c9e8f6e161 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1968,6 +1968,7 @@ fn restarting_miner() { #[test] #[ignore] +#[allow(non_snake_case)] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1977,7 +1978,12 @@ fn restarting_miner() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn flash_blocks_on_epoch_3() { +/// +/// NOTE: This test has been disabled because it's flaky, and we don't need to +/// test the Epoch 3 transition since it's already happened +/// +/// See issue [#5765](https://github.com/stacks-network/stacks-core/issues/5765) for details +fn flash_blocks_on_epoch_3_FLAKY() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } From eb883ee9eef299631e8adda64d4c5a05de804c5a Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 15:42:24 +0000 Subject: [PATCH 66/82] improved error messages on timeout --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ec827033ba..4adba66be1 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -380,7 +380,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while trying reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); } @@ -437,7 +437,7 @@ impl SignerCoordinator { "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Gave up while trying reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); } else { continue; From bbc6ab82cc6aa76954d38cb6b0907eaa91204201 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 10:52:45 -0600 Subject: [PATCH 67/82] fix revealed flake in restarting_miner and test_shadow_recovery --- .../stacks-node/src/tests/nakamoto_integrations.rs | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 0c60902fa0..3a75e6b630 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -1745,7 +1745,7 @@ fn restarting_miner() { naka_proposed_blocks: proposals_submitted_2, .. } = run_loop_2.counters(); - let rl2_counters = run_loop.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); @@ -1832,13 +1832,16 @@ fn restarting_miner() { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - Ok(tip.stacks_block_height > last_tip.stacks_block_height) + let stacks_tip_committed_to = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(tip.stacks_block_height > last_tip.stacks_block_height + && stacks_tip_committed_to > last_tip.stacks_block_height) }) .unwrap_or_else(|e| { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - error!( "Failed to get a new block after restart"; "last_tip_height" => last_tip.stacks_block_height, @@ -10043,8 +10046,7 @@ fn test_shadow_recovery() { // revive ATC-C by waiting for commits for _i in 0..4 { - btc_regtest_controller.bootstrap_chain(1); - sleep_ms(30_000); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // make another tenure From 8803ff15582c63a360456ee5db0f61c5c71d01fc Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Wed, 29 Jan 2025 16:30:21 -0500 Subject: [PATCH 68/82] feat: do not issue a time-based tenure extend earlier than needed With this change, a miner will not issue a time-based tenure extend if it has used less than X% of the tenure budget, where X can be specified in the `tenure_extend_cost_threshold` config option. --- CHANGELOG.md | 5 ++ stackslib/src/chainstate/nakamoto/miner.rs | 36 +++++++---- stackslib/src/cli.rs | 18 +++++- stackslib/src/config/mod.rs | 8 ++- .../stacks-node/src/nakamoto_node/miner.rs | 63 ++++++++++++++----- 5 files changed, 99 insertions(+), 31 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 226f7b5159..7ec05bb51e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,9 +7,14 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] +### Added + +- Miner config option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted + ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Miner will not issue a tenure extend until at least half of the block budget has been spent (#5757) ## [3.1.0.0.4] diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index a36e52512d..0156adbc96 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,6 +150,14 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +pub struct BlockMetadata { + pub block: NakamotoBlock, + pub tenure_consumed: ExecutionCost, + pub tenure_budget: ExecutionCost, + pub tenure_size: u64, + pub tx_events: Vec, +} + impl NakamotoBlockBuilder { /// Make a block builder from genesis (testing only) pub fn new_first_block( @@ -526,7 +534,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_bitvec_len: u16, - ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + ) -> Result { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -556,7 +564,7 @@ impl NakamotoBlockBuilder { builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_info.cause())?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; - let block_limit = tenure_tx + let tenure_budget = tenure_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); @@ -570,7 +578,7 @@ impl NakamotoBlockBuilder { (1..=100).contains(&percentage), "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" ); - let mut remaining_limit = block_limit.clone(); + let mut remaining_limit = tenure_budget.clone(); let cost_so_far = tenure_tx.cost_so_far(); if remaining_limit.sub(&cost_so_far).is_ok() && remaining_limit.divide(100).is_ok() { remaining_limit.multiply(percentage.into()).expect( @@ -581,7 +589,7 @@ impl NakamotoBlockBuilder { "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; "remaining_limit" => %remaining_limit, "cost_so_far" => %cost_so_far, - "block_limit" => %block_limit, + "block_limit" => %tenure_budget, ); soft_limit = Some(remaining_limit); }; @@ -630,13 +638,13 @@ impl NakamotoBlockBuilder { // save the block so we can build microblocks off of it let block = builder.mine_nakamoto_block(&mut tenure_tx); - let size = builder.bytes_so_far; - let consumed = builder.tenure_finish(tenure_tx)?; + let tenure_size = builder.bytes_so_far; + let tenure_consumed = builder.tenure_finish(tenure_tx)?; let ts_end = get_epoch_time_ms(); set_last_mined_block_transaction_count(block.txs.len() as u64); - set_last_mined_execution_cost_observed(&consumed, &block_limit); + set_last_mined_execution_cost_observed(&tenure_consumed, &tenure_budget); info!( "Miner: mined Nakamoto block"; @@ -645,14 +653,20 @@ impl NakamotoBlockBuilder { "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, - "block_size" => size, - "execution_consumed" => %consumed, - "percent_full" => block_limit.proportion_largest_dimension(&consumed), + "block_size" => tenure_size, + "execution_consumed" => %tenure_consumed, + "percent_full" => tenure_budget.proportion_largest_dimension(&tenure_consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); - Ok((block, consumed, size, tx_events)) + Ok(BlockMetadata { + block, + tenure_consumed, + tenure_budget, + tenure_size, + tx_events, + }) } pub fn get_bytes_so_far(&self) -> u64 { diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 286e7f1854..f43812f2ba 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::OnChainRewardSetProvider; -use crate::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; +use crate::chainstate::nakamoto::miner::{BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; @@ -504,7 +504,21 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { None, 0, ) - .map(|(block, cost, size, _)| (block.header.block_hash(), block.txs, cost, size)) + .map( + |BlockMetadata { + block, + tenure_consumed, + tenure_size, + .. + }| { + ( + block.header.block_hash(), + block.txs, + tenure_consumed, + tenure_size, + ) + }, + ) } }; diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b9b9bf5204..3684b16a32 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -97,7 +97,8 @@ const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; // This should be greater than the signers' timeout. This is used for issuing fallback tenure extends -const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -2155,6 +2156,8 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// Percentage of block budget that must be used before attempting a time-based tenure extend + pub tenure_extend_cost_threshold: u64, } impl Default for MinerConfig { @@ -2193,6 +2196,7 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + tenure_extend_cost_threshold: DEFAULT_TENURE_EXTEND_COST_THRESHOLD, } } } @@ -2589,6 +2593,7 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub tenure_extend_cost_threshold: Option, } impl MinerConfigFile { @@ -2731,6 +2736,7 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + tenure_extend_cost_threshold: self.tenure_extend_cost_threshold.unwrap_or(miner_default_config.tenure_extend_cost_threshold), }) } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb233737bb..16b33ead7a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,6 +21,7 @@ use std::thread; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; @@ -177,6 +178,10 @@ pub struct BlockMinerThread { last_block_mined: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, + /// Cost consumed by the current tenure + tenure_cost: ExecutionCost, + /// Cost budget for the current tenure + tenure_budget: ExecutionCost, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -237,6 +242,8 @@ impl BlockMinerThread { burn_tip_at_start: burn_tip_at_start.clone(), tenure_change_time: Instant::now(), abort_flag: Arc::new(AtomicBool::new(false)), + tenure_cost: ExecutionCost::ZERO, + tenure_budget: ExecutionCost::ZERO, } } @@ -1183,7 +1190,7 @@ impl BlockMinerThread { } // build the block itself - let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( + let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db .index_handle_at_ch(&self.burn_block.consensus_hash) @@ -1210,39 +1217,48 @@ impl BlockMinerThread { e })?; - if block.txs.is_empty() { + if block_metadata.block.txs.is_empty() { return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key - .sign(block.header.miner_signature_hash().as_bytes()) + .sign( + block_metadata + .block + .header + .miner_signature_hash() + .as_bytes(), + ) .map_err(NakamotoNodeError::MinerSignatureError)?; - block.header.miner_signature = miner_signature; + block_metadata.block.header.miner_signature = miner_signature; info!( "Miner: Assembled block #{} for signer set proposal: {}, with {} txs", - block.header.chain_length, - block.header.block_hash(), - block.txs.len(); - "signer_sighash" => %block.header.signer_signature_hash(), - "consensus_hash" => %block.header.consensus_hash, - "parent_block_id" => %block.header.parent_block_id, - "timestamp" => block.header.timestamp, + block_metadata.block.header.chain_length, + block_metadata.block.header.block_hash(), + block_metadata.block.txs.len(); + "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "consensus_hash" => %block_metadata.block.header.consensus_hash, + "parent_block_id" => %block_metadata.block.header.parent_block_id, + "timestamp" => block_metadata.block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( self.burn_block.block_height, - &block, - size, - &consumed, - tx_events, + &block_metadata.block, + block_metadata.tenure_size, + &block_metadata.tenure_consumed, + block_metadata.tx_events, ); + self.tenure_cost = block_metadata.tenure_consumed; + self.tenure_budget = block_metadata.tenure_budget; + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; - Ok(block) + Ok(block_metadata.block) } #[cfg_attr(test, mutants::skip)] @@ -1273,8 +1289,20 @@ impl BlockMinerThread { } } }; + // Check if we can and should include a time-based tenure extend. if self.last_block_mined.is_some() { - // Check if we can extend the current tenure + // Do not extend if we have spent < 50% of the budget, since it is + // not necessary. + let usage = self + .tenure_budget + .proportion_largest_dimension(&self.tenure_cost); + if usage < self.config.miner.tenure_extend_cost_threshold { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); if get_epoch_time_secs() <= tenure_extend_timestamp && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout @@ -1284,6 +1312,7 @@ impl BlockMinerThread { tenure_change_tx: None, }); } + info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, From 21af65581308a89ab5cdee3f69a89d7b6fdcd630 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 12:31:34 -0500 Subject: [PATCH 69/82] test: add `tenure_extend_cost_threshold` integration test Also update the configs in other tests so that they do not need to wait for the default tenure extend threshold. --- testnet/stacks-node/src/tests/signer/v0.rs | 119 ++++++++++++++++++++- 1 file changed, 116 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 2bbcccaced..daf3d44186 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2630,7 +2630,9 @@ fn tenure_extend_after_idle_signers() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -2680,7 +2682,9 @@ fn tenure_extend_with_other_transactions() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -2787,6 +2791,7 @@ fn tenure_extend_after_idle_miner() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2863,6 +2868,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2951,7 +2957,9 @@ fn stx_transfers_dont_effect_idle_timeout() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -3085,6 +3093,7 @@ fn idle_tenure_extend_active_mining() { |config| { // accept all proposals in the node config.connection_options.block_proposal_max_age_secs = u64::MAX; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -12592,3 +12601,107 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction +/// only after it has reached the cost threshold. +fn tenure_extend_cost_threshold() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let deployer_sk = Secp256k1PrivateKey::random(); + let deployer_addr = tests::to_addr(&deployer_sk); + let num_txs = 10; + let tx_fee = 10000; + let deploy_fee = 190200; + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(deployer_addr, deploy_fee + tx_fee * num_txs)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_extend_cost_threshold = 5; + }, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect_err("Received a tenure extend before cost threshold was reached"); + + // Now deploy a contract and call it in order to cross the threshold. + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + ["(var-get my-var)"; 250].join(" ") + ); + + // First, lets deploy the contract + let mut nonce = 0; + let contract_tx = make_contract_publish( + &deployer_sk, + nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + nonce += 1; + + // Wait for the contract to be included in a block + wait_for(60, || { + let account = get_account(&http_origin, &deployer_addr); + Ok(account.nonce == nonce) + }) + .expect("Contract not included in block"); + + // Now, lets call the contract a bunch of times to increase the tenure cost + for _ in 0..num_txs { + let call_tx = make_contract_call( + &deployer_sk, + nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + submit_tx(&http_origin, &call_tx); + nonce += 1; + } + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} From fe429f472fe3e99b6cf1909fc5574262109fd36c Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 17:56:55 +0000 Subject: [PATCH 70/82] report rejection step in miner log, ensure signe_test shutdown in integration test --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 5 +++-- testnet/stacks-node/src/tests/signer/v0.rs | 3 +++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 4adba66be1..7a778580d7 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -390,7 +390,7 @@ impl SignerCoordinator { if rejections != block_status.total_reject_weight as u64 { rejections = block_status.total_reject_weight as u64; - let rejections_timeout_tuple = self + let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) .last() @@ -399,10 +399,11 @@ impl SignerCoordinator { "Invalid rejection timeout step function definition".into(), ) })?; - rejections_timeout = rejections_timeout_tuple.1; + rejections_timeout = new_rejections_timeout; info!("Number of received rejections updated, resetting timeout"; "rejections" => rejections, "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); Counters::set( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 9fa4fac3c0..e055ad82f2 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -8111,6 +8111,9 @@ fn block_validation_check_rejection_timeout_heuristic() { // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); } /// Test scenario: From 60634c20c0392e30f812d5e942fe4a5721435e5d Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 12:03:43 -0600 Subject: [PATCH 71/82] fix flake in check_block_times --- .../src/tests/nakamoto_integrations.rs | 49 ++++++++----------- 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 3a75e6b630..f426995595 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -780,19 +780,18 @@ pub fn next_block_and_wait_for_commits( .map(|info| info.stacks_tip_height) .max() .unwrap(); - let last_commit_burn_hts: Vec<_> = node_counters + let last_commit_burn_hts = node_counters .iter() - .map(|c| &c.naka_submitted_commit_last_burn_height) - .collect(); - let last_commit_stacks_hts: Vec<_> = node_counters + .map(|c| &c.naka_submitted_commit_last_burn_height); + let last_commit_stacks_hts = node_counters .iter() - .map(|c| &c.naka_submitted_commit_last_stacks_tip) - .collect(); + .map(|c| &c.naka_submitted_commit_last_stacks_tip); next_block_and(btc_controller, timeout_secs, || { - let burn_height_committed_to = last_commit_burn_hts.iter().all(|last_commit_burn_height| { - last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before - }); + let burn_height_committed_to = + last_commit_burn_hts.clone().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { Ok(burn_height_committed_to) } else { @@ -801,7 +800,7 @@ pub fn next_block_and_wait_for_commits( } let stacks_tip_committed_to = last_commit_stacks_hts - .iter() + .clone() .all(|last_commit_stacks_height| { last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before }); @@ -7564,6 +7563,7 @@ fn check_block_times() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7606,19 +7606,13 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &counters.naka_submitted_commits); - let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_start = epoch_3.start_height; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height + 1; - Ok(info.burn_block_height == epoch_3_start) - }) - .unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height + 1; + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let time0_value = call_read_only( &naka_conf, @@ -7676,16 +7670,13 @@ fn check_block_times() { Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); - last_stacks_block_height = stacks_block_height; // Repeat these tests for 5 tenures for _ in 0..5 { - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - stacks_block_height = info.stacks_tip_height as u128; - Ok(stacks_block_height > last_stacks_block_height) - }) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + last_stacks_block_height = stacks_block_height; last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); From 6a238a76228c16c3a15e91a6d1352175299b5862 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Fri, 31 Jan 2025 18:09:48 +0000 Subject: [PATCH 72/82] added set_miner_current_rejections_timeout and set_miner_current_rejections to Counters struct --- .../src/nakamoto_node/signer_coordinator.rs | 7 ++----- testnet/stacks-node/src/run_loop/neon.rs | 12 ++++++++++-- 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 7a778580d7..97b22ef68e 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -406,11 +406,8 @@ impl SignerCoordinator { "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - Counters::set( - &counters.naka_miner_current_rejections_timeout_secs, - rejections_timeout.as_secs(), - ); - Counters::set(&counters.naka_miner_current_rejections, rejections); + counters.set_miner_current_rejections_timeout(rejections_timeout.as_secs()); + counters.set_miner_current_rejections(rejections); } if block_status diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 1d372f1051..3ef3e45ccb 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -139,12 +139,12 @@ impl Counters { fn inc(_ctr: &RunLoopCounter) {} #[cfg(test)] - pub fn set(ctr: &RunLoopCounter, value: u64) { + fn set(ctr: &RunLoopCounter, value: u64) { ctr.0.store(value, Ordering::SeqCst); } #[cfg(not(test))] - pub fn set(_ctr: &RunLoopCounter, _value: u64) {} + fn set(_ctr: &RunLoopCounter, _value: u64) {} pub fn bump_blocks_processed(&self) { Counters::inc(&self.blocks_processed); @@ -217,6 +217,14 @@ impl Counters { pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } + + pub fn set_miner_current_rejections_timeout(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) + } + + pub fn set_miner_current_rejections(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections, value) + } } /// Coordinating a node running in neon mode. From 651018450831970647cb4aa8dfe1fba0162f6740 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 13:11:09 -0500 Subject: [PATCH 73/82] chore: formatting --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 92211cbe8f..84516a1eac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,7 +14,7 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` behave normally (e.g., submitting validation requests, submitting finished blocks). A dry run signer will error out if the supplied key is actually a registered signer. -- Miner config option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted +- Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed From 870187b7114c685a1bc75cf8a0a2be31e285b964 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 13:13:02 -0500 Subject: [PATCH 74/82] chore: move `dry_run` info to signer changelog --- CHANGELOG.md | 5 ----- stacks-signer/CHANGELOG.md | 10 ++++++++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84516a1eac..1644446dd7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,11 +9,6 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ### Added -- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will - run the signer binary as if it were a registered signer. Instead of broadcasting - `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` - behave normally (e.g., submitting validation requests, submitting finished blocks). A - dry run signer will error out if the supplied key is actually a registered signer. - Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2697d93508..df30e0d0db 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,6 +5,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will + run the signer binary as if it were a registered signer. Instead of broadcasting + `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` + behave normally (e.g., submitting validation requests, submitting finished blocks). A + dry run signer will error out if the supplied key is actually a registered signer. + ## [3.1.0.0.4.0] ## Added From fb2a70717b9544b542490cabd83927e96f201326 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 12:30:08 -0600 Subject: [PATCH 75/82] test: fix reward-cycle flake in multiple_miners_empty_sortition --- testnet/stacks-node/src/tests/signer/v0.rs | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index fa4a6ec672..cf3b45656c 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -85,8 +85,9 @@ use crate::tests::nakamoto_integrations::{ POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, get_chain_info_opt, get_sortition_info, get_sortition_info_ch, - next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, + get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, + submit_tx_fallible, test_observer, }; use crate::tests::{ self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, @@ -11729,6 +11730,15 @@ fn multiple_miners_empty_sortition() { let last_active_sortition = get_sortition_info(&conf); assert!(last_active_sortition.was_sortition); + // check if we're about to cross a reward cycle boundary -- if so, we can't + // perform this test, because we can't tenure extend across the boundary + let pox_info = get_pox_info(&conf.node.data_url).unwrap(); + let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; + if blocks_until_next_cycle == 1 { + info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); + continue; + } + // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); From eb8e944862f0b876203cb5983725073edd41a1e5 Mon Sep 17 00:00:00 2001 From: Aaron Blankstein Date: Fri, 31 Jan 2025 13:42:27 -0600 Subject: [PATCH 76/82] test: fix flake in test_tenure_extend_from_flashblocks, bump default test retry sleep from 100ms to 500ms (less log spam in tests) --- .../src/tests/nakamoto_integrations.rs | 83 ++++--------------- 1 file changed, 16 insertions(+), 67 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index f426995595..1ae369a17e 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -693,7 +693,7 @@ where error!("Timed out waiting for check to process"); return Err("Timed out".into()); } - thread::sleep(Duration::from_millis(100)); + thread::sleep(Duration::from_millis(500)); } Ok(()) } @@ -10764,8 +10764,6 @@ fn test_tenure_extend_from_flashblocks() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes @@ -10818,17 +10816,9 @@ fn test_tenure_extend_from_flashblocks() { ); submit_tx(&http_origin, &contract_tx); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&naka_conf.node.data_url, &deployer_addr).nonce; + Ok(sender_nonce > 0) }) .expect("Timed out waiting for interim blocks to be mined"); @@ -10836,39 +10826,23 @@ fn test_tenure_extend_from_flashblocks() { // stall miner and relayer - // make tenure but don't wait for a stacks block - next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + // make tenure + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - // prevent the mienr from sending another block-commit + // prevent the miner from sending another block-commit nakamoto_test_skip_commit_op.set(true); - // make sure we get a block-found tenure change - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - // make sure the relayer processes both sortitions - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); + let info_before = get_chain_info(&naka_conf); // mine another Bitcoin block right away, since it will contain a block-commit btc_regtest_controller.bootstrap_chain(1); - wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) - }) - .unwrap(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let info = get_chain_info(&naka_conf); + Ok(info.burn_block_height > info_before.burn_block_height + && info.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for interim blocks to be mined"); + .unwrap(); let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); @@ -10895,11 +10869,9 @@ fn test_tenure_extend_from_flashblocks() { // Given the above, this will be an `Extend` tenure. TEST_MINER_THREAD_STALL.set(false); - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) + let cur_sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(cur_sort_tip.block_height > sort_tip.block_height) }) .unwrap(); @@ -10977,7 +10949,6 @@ fn test_tenure_extend_from_flashblocks() { // wait for the miner directive to be processed wait_for(60, || { - sleep_ms(30_000); let directives_cnt = nakamoto_miner_directives.load(Ordering::SeqCst); Ok(directives_cnt > miner_directives_before) }) @@ -10985,7 +10956,7 @@ fn test_tenure_extend_from_flashblocks() { // wait for all of the aforementioned transactions to get mined wait_for(120, || { - // fill mempool with transactions that depend on the burn view + // check account nonces from the sent transactions for (sender_sk, account_before) in account_keys.iter().zip(accounts_before.iter()) { let sender_addr = tests::to_addr(sender_sk); let account = loop { @@ -11042,28 +11013,7 @@ fn test_tenure_extend_from_flashblocks() { } // mine one additional tenure, to verify that we're on track - let commits_before = commits_submitted.load(Ordering::SeqCst); - let node_info_before = get_chain_info_opt(&naka_conf).unwrap(); - - btc_regtest_controller.bootstrap_chain(1); - - wait_for(20, || { - Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) - }) - .unwrap(); - - // there was a sortition winner - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(sort_tip.sortition); - - wait_for(20, || { - let node_info = get_chain_info_opt(&naka_conf).unwrap(); - Ok( - node_info.burn_block_height > node_info_before.burn_block_height - && node_info.stacks_tip_height > node_info_before.stacks_tip_height, - ) - }) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // boot a follower. it should reach the chain tip info!("----- BEGIN FOLLOWR BOOTUP ------"); @@ -11118,9 +11068,8 @@ fn test_tenure_extend_from_flashblocks() { debug!("Booted follower-thread"); - let miner_info = get_chain_info_result(&naka_conf).unwrap(); - wait_for(300, || { + let miner_info = get_chain_info_result(&naka_conf).unwrap(); let Ok(info) = get_chain_info_result(&follower_conf) else { sleep_ms(1000); return Ok(false); From 6fccd18d9dfeccf1035d1b48a66062e4b57f3132 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Fri, 31 Jan 2025 15:07:07 -0500 Subject: [PATCH 77/82] chore: add comments requested in code review Also add one more assertion to the test. --- stackslib/src/chainstate/nakamoto/miner.rs | 7 +++++ stackslib/src/config/mod.rs | 30 ++++++++++++++++++++-- stackslib/src/net/connection.rs | 1 + testnet/stacks-node/src/tests/signer/v0.rs | 5 ++++ 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index 0156adbc96..d9ad1319f7 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,11 +150,18 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +/// Structure returned from `NakamotoBlockBuilder::build_nakamoto_block` with +/// information about the block that was built. pub struct BlockMetadata { + /// The block that was built pub block: NakamotoBlock, + /// The execution cost consumed so far by the current tenure pub tenure_consumed: ExecutionCost, + /// The cost budget for the current tenure pub tenure_budget: ExecutionCost, + /// The size of the blocks in the current tenure in bytes pub tenure_size: u64, + /// The events emitted by the transactions included in this block pub tx_events: Vec, } diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index 3684b16a32..2e494cacef 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -86,18 +86,40 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( OP_TX_VOTE_AGG_ESTIM_SIZE ); +/// Default maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate +/// may be increased to when RBFing a transaction const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x +/// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin +/// transaction const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; +/// Default number of reward cycles of blocks to sync in a non-full inventory +/// sync const INV_REWARD_CYCLES_TESTNET: u64 = 6; +/// Default minimum time to wait between mining blocks in milliseconds. The +/// value must be greater than or equal to 1000 ms because if a block is mined +/// within the same second as its parent, it will be rejected by the signers. const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +/// Default time in milliseconds to pause after receiving the first threshold +/// rejection, before proposing a new block. const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +/// Default time in milliseconds to pause after receiving subsequent threshold +/// rejections, before proposing a new block. const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +/// Default time in milliseconds to wait for a Nakamoto block after seeing a +/// burnchain block before submitting a block commit. const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +/// Default percentage of the remaining tenure cost limit to consume each block const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +/// Default number of seconds to wait in-between polling the sortition DB to +/// see if we need to extend the ongoing tenure (e.g. because the current +/// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; - -// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends +/// Default duration to wait before attempting to issue a tenure extend. +/// This should be greater than the signers' timeout. This is used for issuing +/// fallback tenure extends const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +/// Default percentage of block budget that must be used before attempting a +/// time-based tenure extend const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = @@ -1192,9 +1214,13 @@ pub struct BurnchainConfig { pub process_exit_at_block_height: Option, pub poll_time_secs: u64, pub satoshis_per_byte: u64, + /// Maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate may + /// be increased to when RBFing a transaction pub max_rbf: u64, pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, + /// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin + /// transaction pub rbf_fee_increment: u64, pub first_burn_block_height: Option, pub first_burn_block_timestamp: Option, diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 09465721ba..1204bad7fd 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -379,6 +379,7 @@ pub struct ConnectionOptions { /// Units are milliseconds. A value of 0 means "never". pub log_neighbors_freq: u64, pub inv_sync_interval: u64, + // how many reward cycles of blocks to sync in a non-full inventory sync pub inv_reward_cycles: u64, pub download_interval: u64, pub pingback_timeout: u64, diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 62391f14af..63154a9a57 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -12683,6 +12683,11 @@ fn tenure_extend_cost_threshold() { }) .expect("Contract not included in block"); + // Ensure the tenure was not extended in that block + assert!(!last_block_contains_tenure_change_tx( + TenureChangeCause::Extended + )); + // Now, lets call the contract a bunch of times to increase the tenure cost for _ in 0..num_txs { let call_tx = make_contract_call( From 6041be6784e0b3576370428a265dab9521d54be9 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 31 Jan 2025 16:04:03 -0500 Subject: [PATCH 78/82] fix: Disable flaky test `flash_blocks_on_epoch_3` for real this time --- .github/workflows/bitcoin-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 57b37f44e7..2f1ea4f219 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -84,7 +84,7 @@ jobs: - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition - - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY + # - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb From 8d2bb0d80db3210a44730d3252ffe0197a2e8f37 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 10:54:30 +0100 Subject: [PATCH 79/82] refactored block_validation_check_rejection_timeout_heuristic test --- testnet/stacks-node/src/tests/signer/v0.rs | 183 ++++----------------- 1 file changed, 33 insertions(+), 150 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index e055ad82f2..26f95d9ce0 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -7947,166 +7947,49 @@ fn block_validation_check_rejection_timeout_heuristic() { // note we just use mined nakamoto_blocks as the second block is not going to be confirmed - info!("------------------------- Check Rejections-based timeout with 1 rejection -------------------------"); + let mut test_rejections = |signer_split_index: usize, expected_timeout: u64| { + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let (ignore_signers, reject_signers) = all_signers.split_at(signer_split_index); - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + info!("------------------------- Check Rejections-based timeout with {} rejections -------------------------", reject_signers.len()); - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..19].to_vec()); + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(reject_signers.to_vec()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignore_signers.to_vec()); - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[19]]) - .unwrap(); - - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 1) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 123 - ); - - info!("------------------------- Check Rejections-based timeout with 2 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[18], all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..18].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections(timeout.as_secs(), &[all_signers[18], all_signers[19]]) - .unwrap(); - - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 2) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 20 - ); - - info!("------------------------- Check Rejections-based timeout with 3 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![all_signers[17], all_signers[18], all_signers[19]]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..17].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); - - signer_test - .wait_for_block_rejections( - timeout.as_secs(), - &[all_signers[17], all_signers[18], all_signers[19]], + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), ) .unwrap(); - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 3) - }) - .unwrap(); - - assert_eq!( signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 10 - ); - - info!("------------------------- Check Rejections-based timeout with 4 rejections -------------------------"); - - let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); - - TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![ - all_signers[16], - all_signers[17], - all_signers[18], - all_signers[19], - ]); - TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(all_signers[0..16].to_vec()); - - next_block_and( - &mut signer_test.running_nodes.btc_regtest_controller, - 30, - || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), - ) - .unwrap(); + .wait_for_block_rejections(timeout.as_secs(), &reject_signers) + .unwrap(); - signer_test - .wait_for_block_rejections( - timeout.as_secs(), - &[ - all_signers[16], - all_signers[17], - all_signers[18], - all_signers[19], - ], - ) + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= reject_signers.len() as u64) + }) .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + expected_timeout + ); + }; - wait_for(60, || { - Ok(signer_test - .running_nodes - .counters - .naka_miner_current_rejections - .get() - >= 4) - }) - .unwrap(); - assert_eq!( - signer_test - .running_nodes - .counters - .naka_miner_current_rejections_timeout_secs - .get(), - 99 - ); + test_rejections(19, 123); + test_rejections(18, 20); + test_rejections(17, 10); + test_rejections(16, 99); // reset reject/ignore TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); From 151c844734122174e75a0f801d2d0f28c4085a08 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 11:00:18 +0100 Subject: [PATCH 80/82] use From based cast for u32->u64 --- testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 97b22ef68e..ff08633ac6 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -334,7 +334,7 @@ impl SignerCoordinator { return false; } // number or rejections changed? - if status.total_reject_weight as u64 != rejections { + if u64::from(status.total_reject_weight) != rejections { return false; } // enough signatures? @@ -388,8 +388,8 @@ impl SignerCoordinator { } }; - if rejections != block_status.total_reject_weight as u64 { - rejections = block_status.total_reject_weight as u64; + if rejections != u64::from(block_status.total_reject_weight) { + rejections = u64::from(block_status.total_reject_weight); let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) From f66c8944082a0040b3bb399a9f676a9f07497dff Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Sat, 1 Feb 2025 11:08:20 +0100 Subject: [PATCH 81/82] removed useless line --- testnet/stacks-node/src/tests/signer/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 5f41ff816a..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -936,7 +936,6 @@ fn setup_stx_btc_node( let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); - let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); // Give the run loop some time to start up! From 400c806aaa812d4565b94d57be60527040fb31e0 Mon Sep 17 00:00:00 2001 From: Roberto De Ioris Date: Mon, 3 Feb 2025 08:26:54 +0100 Subject: [PATCH 82/82] use u32 for rejections counter --- .../src/nakamoto_node/signer_coordinator.rs | 16 ++++++++-------- testnet/stacks-node/src/run_loop/neon.rs | 6 +++--- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index ff08633ac6..2138b7e767 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -70,7 +70,7 @@ pub struct SignerCoordinator { /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, /// The timeout configuration based on the percentage of rejections - block_rejection_timeout_steps: BTreeMap, + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -107,10 +107,10 @@ impl SignerCoordinator { let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); // build a BTreeMap of the various timeout steps - let mut block_rejection_timeout_steps = BTreeMap::::new(); + let mut block_rejection_timeout_steps = BTreeMap::::new(); for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { let rejections_amount = - ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u64; + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u32; block_rejection_timeout_steps.insert(rejections_amount, *duration); } @@ -308,7 +308,7 @@ impl SignerCoordinator { counters: &Counters, ) -> Result, NakamotoNodeError> { // the amount of current rejections (used to eventually modify the timeout) - let mut rejections: u64 = 0; + let mut rejections: u32 = 0; // default timeout (the 0 entry must be always present) let mut rejections_timeout = self .block_rejection_timeout_steps @@ -334,7 +334,7 @@ impl SignerCoordinator { return false; } // number or rejections changed? - if u64::from(status.total_reject_weight) != rejections { + if status.total_reject_weight != rejections { return false; } // enough signatures? @@ -388,8 +388,8 @@ impl SignerCoordinator { } }; - if rejections != u64::from(block_status.total_reject_weight) { - rejections = u64::from(block_status.total_reject_weight); + if rejections != block_status.total_reject_weight { + rejections = block_status.total_reject_weight; let (rejections_step, new_rejections_timeout) = self .block_rejection_timeout_steps .range((Included(0), Included(rejections))) @@ -406,7 +406,7 @@ impl SignerCoordinator { "rejections_step" => rejections_step, "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); - counters.set_miner_current_rejections_timeout(rejections_timeout.as_secs()); + counters.set_miner_current_rejections_timeout_secs(rejections_timeout.as_secs()); counters.set_miner_current_rejections(rejections); } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 3ef3e45ccb..299335f35f 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -218,12 +218,12 @@ impl Counters { Counters::set(&self.microblocks_processed, value) } - pub fn set_miner_current_rejections_timeout(&self, value: u64) { + pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) { Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) } - pub fn set_miner_current_rejections(&self, value: u64) { - Counters::set(&self.naka_miner_current_rejections, value) + pub fn set_miner_current_rejections(&self, value: u32) { + Counters::set(&self.naka_miner_current_rejections, u64::from(value)) } }