From 8351ae292f7441c2d4f508a57c9f417fc3e8f32f Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 1 Jan 2025 22:56:04 -0500 Subject: [PATCH 01/13] chore(stacks-common): Remove unused imports and enable warning --- stacks-common/src/address/b58.rs | 2 +- stacks-common/src/address/mod.rs | 3 +-- stacks-common/src/bitvec.rs | 1 - .../src/deps_common/bitcoin/blockdata/block.rs | 1 - .../src/deps_common/bitcoin/blockdata/script.rs | 1 - .../src/deps_common/bitcoin/blockdata/transaction.rs | 4 +--- .../deps_common/bitcoin/network/message_network.rs | 2 -- stacks-common/src/deps_common/bitcoin/util/hash.rs | 2 +- stacks-common/src/deps_common/httparse/mod.rs | 4 +--- stacks-common/src/libcommon.rs | 4 +--- stacks-common/src/types/chainstate.rs | 12 +++++------- stacks-common/src/types/mod.rs | 1 - stacks-common/src/util/chunked_encoding.rs | 3 ++- stacks-common/src/util/db.rs | 2 +- stacks-common/src/util/hash.rs | 7 ++----- stacks-common/src/util/log.rs | 3 +-- stacks-common/src/util/mod.rs | 1 - stacks-common/src/util/pipe.rs | 4 ---- stacks-common/src/util/retry.rs | 6 +----- stacks-common/src/util/secp256k1.rs | 6 ++---- stacks-common/src/util/vrf.rs | 7 ------- 21 files changed, 20 insertions(+), 56 deletions(-) diff --git a/stacks-common/src/address/b58.rs b/stacks-common/src/address/b58.rs index 6a135392e5..ffba441de6 100644 --- a/stacks-common/src/address/b58.rs +++ b/stacks-common/src/address/b58.rs @@ -14,7 +14,7 @@ //! Base58 encoder and decoder -use std::{error, fmt, str}; +use std::{fmt, str}; use crate::address::Error; use crate::util::hash::DoubleSha256; diff --git a/stacks-common/src/address/mod.rs b/stacks-common/src/address/mod.rs index 381456f661..8377d0087d 100644 --- a/stacks-common/src/address/mod.rs +++ b/stacks-common/src/address/mod.rs @@ -19,7 +19,7 @@ use std::{error, fmt}; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes::All as btc_opcodes; -use crate::deps_common::bitcoin::blockdata::script::{Builder, Instruction, Script}; +use crate::deps_common::bitcoin::blockdata::script::Builder; use crate::types::PublicKey; use crate::util::hash::Hash160; @@ -220,7 +220,6 @@ pub fn public_keys_to_address_hash( mod test { use super::*; use crate::util::hash::*; - use crate::util::log; use crate::util::secp256k1::Secp256k1PublicKey as PubKey; struct PubkeyFixture { diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 7c77e5da32..065dd5e814 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -260,7 +260,6 @@ mod test { use super::BitVec; use crate::codec::StacksMessageCodec; - use crate::util::hash::to_hex; fn check_set_get(mut input: BitVec<{ u16::MAX }>) { let original_input = input.clone(); diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs index af064511b5..9a797fd846 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs @@ -25,7 +25,6 @@ use crate::deps_common::bitcoin::blockdata::transaction::Transaction; use crate::deps_common::bitcoin::network::constants::Network; use crate::deps_common::bitcoin::network::encodable::VarInt; use crate::deps_common::bitcoin::network::serialize::BitcoinHash; -use crate::deps_common::bitcoin::util; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::deps_common::bitcoin::util::Error; use crate::deps_common::bitcoin::util::Error::{SpvBadProofOfWork, SpvBadTarget}; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index 34ee5897c3..cf0e3296b1 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -27,7 +27,6 @@ use std::mem::size_of; use std::{error, fmt}; -use serde; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs index c2d4c4e0a2..6dbf49bd5d 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs @@ -34,7 +34,6 @@ use crate::deps_common::bitcoin::network::serialize::{ self, serialize, BitcoinHash, SimpleDecoder, SimpleEncoder, }; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::to_hex; /// A reference to a transaction output #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] @@ -675,7 +674,7 @@ impl SigHashType { #[cfg(test)] mod tests { - use super::{SigHashType, Transaction, TxIn}; + use super::{Transaction, TxIn}; use crate::deps_common; use crate::deps_common::bitcoin::blockdata::script::Script; use crate::deps_common::bitcoin::network::serialize::{deserialize, BitcoinHash}; @@ -690,7 +689,6 @@ mod tests { #[test] fn test_is_coinbase() { - use crate::deps_common::bitcoin::blockdata::constants; use crate::deps_common::bitcoin::network::constants::Network; let genesis = deps_common::bitcoin::blockdata::constants::genesis_block(Network::Bitcoin); diff --git a/stacks-common/src/deps_common/bitcoin/network/message_network.rs b/stacks-common/src/deps_common/bitcoin/network/message_network.rs index 0cf486ba85..a42eb47aea 100644 --- a/stacks-common/src/deps_common/bitcoin/network/message_network.rs +++ b/stacks-common/src/deps_common/bitcoin/network/message_network.rs @@ -19,8 +19,6 @@ //! use crate::deps_common::bitcoin::network::address::Address; -use crate::deps_common::bitcoin::network::constants; -use crate::util; // Some simple messages diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index e1a9455e99..abfce8349f 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -18,7 +18,7 @@ use std::char::from_digit; use std::cmp::min; use std::io::{Cursor, Write}; -use std::{error, fmt, mem}; +use std::{fmt, mem}; use ripemd::Ripemd160; #[cfg(feature = "serde")] diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index b4c9250546..364fe0f8a7 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -30,7 +30,7 @@ //! Originally written by Sean McArthur. //! //! Modified by Jude Nelson to remove all unsafe code. -use std::{error, fmt, mem, result, str}; +use std::{fmt, mem, result, str}; macro_rules! next { ($bytes:ident) => {{ @@ -1282,8 +1282,6 @@ mod tests { #[test] fn test_std_error() { - use std::error::Error as StdError; - use super::Error; let err = Error::HeaderName; assert_eq!(err.to_string(), err.description_str()); diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 04c3acc1ea..34705bebda 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -1,5 +1,4 @@ #![allow(unused_macros)] -#![allow(unused_imports)] #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -7,6 +6,7 @@ #![cfg_attr(test, allow(unused_variables, unused_assignments))] #![allow(clippy::assertions_on_constants)] +#[allow(unused_imports)] #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; @@ -33,8 +33,6 @@ pub mod deps_common; pub mod bitvec; -use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId}; - pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; pub use crate::types::MINING_COMMITMENT_WINDOW; diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 630ce70c9d..e07e37f27a 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -2,24 +2,22 @@ use std::fmt::{self, Display}; use std::io::{Read, Write}; use std::str::FromStr; -use curve25519_dalek::digest::Digest; -use rand::{Rng, SeedableRng}; -use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; -use sha2::{Digest as Sha2Digest, Sha256, Sha512_256}; +use sha2::{Digest as Sha2Digest, Sha512_256}; use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use crate::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::{to_hex, DoubleSha256, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; +use crate::util::hash::{Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; use crate::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::util::uint::Uint256; use crate::util::vrf::{VRFProof, VRF_PROOF_ENCODED_SIZE}; pub type StacksPublicKey = Secp256k1PublicKey; pub type StacksPrivateKey = Secp256k1PrivateKey; +#[cfg(any(test, feature = "testing"))] +use crate::util::hash::DoubleSha256; + /// Hash of a Trie node. This is a SHA2-512/256. #[derive(Default)] pub struct TrieHash(pub [u8; 32]); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 93ebd17bc0..297d629254 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -30,7 +30,6 @@ use crate::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::consts::MICROSTACKS_PER_STACKS; -use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index 445ec5a831..328f94514f 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -445,9 +445,10 @@ impl Write for HttpChunkedTransferWriter<'_, '_, W> { } } +#[cfg(test)] mod test { use std::io; - use std::io::{Read, Write}; + use std::io::Read; use rand::RngCore; diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 53564af597..3a463df4f8 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -17,7 +17,7 @@ use std::backtrace::Backtrace; use std::sync::{LazyLock, Mutex}; use std::thread; -use std::time::{Duration, Instant}; +use std::time::Instant; use hashbrown::HashMap; use rand::{thread_rng, Rng}; diff --git a/stacks-common/src/util/hash.rs b/stacks-common/src/util/hash.rs index 666e72c8e2..85f357d21a 100644 --- a/stacks-common/src/util/hash.rs +++ b/stacks-common/src/util/hash.rs @@ -20,7 +20,6 @@ use std::{fmt, mem}; use ripemd::Ripemd160; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use sha2::{Digest, Sha256, Sha512, Sha512_256}; use sha3::Keccak256; @@ -29,7 +28,7 @@ use crate::types::StacksPublicKeyBuffer; use crate::util::pair::*; use crate::util::secp256k1::Secp256k1PublicKey; use crate::util::uint::Uint256; -use crate::util::{log, HexError}; +use crate::util::HexError; // hash function for Merkle trees pub trait MerkleHashFunc { @@ -659,9 +658,7 @@ pub fn bytes_to_hex(s: &[u8]) -> String { #[cfg(test)] mod test { - use super::{ - bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerklePath, MerkleTree, - }; + use super::{bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerkleTree}; struct MerkleTreeFixture { data: Vec>, diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index b0ac704f0c..1699aa6f54 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -15,13 +15,12 @@ // along with this program. If not, see . use std::io::Write; -use std::sync::Mutex; use std::time::{Duration, SystemTime}; use std::{env, io, thread}; use chrono::prelude::*; use lazy_static::lazy_static; -use slog::{BorrowedKV, Drain, FnValue, Level, Logger, OwnedKVList, Record, KV}; +use slog::{Drain, Level, Logger, OwnedKVList, Record, KV}; use slog_term::{CountingWriter, Decorator, RecordDecorator, Serializer}; lazy_static! { diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 8fc9499b2f..0e9ff49cca 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -28,7 +28,6 @@ pub mod secp256k1; pub mod uint; pub mod vrf; -use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; use std::path::{Path, PathBuf}; diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index 86d92abd61..4407fee71f 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -21,8 +21,6 @@ use std::io; use std::io::{Read, Write}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError, TrySendError}; -use crate::util::log; - /// Inter-thread pipe for streaming messages, built on channels. /// Used mainly in conjunction with networking. /// @@ -316,7 +314,6 @@ impl Write for PipeWrite { #[cfg(test)] mod test { - use std::io::prelude::*; use std::io::{Read, Write}; use std::{io, thread}; @@ -324,7 +321,6 @@ mod test { use rand::RngCore; use super::*; - use crate::util::*; #[test] fn test_connection_pipe_oneshot() { diff --git a/stacks-common/src/util/retry.rs b/stacks-common/src/util/retry.rs index e7f6c0b140..47801289a3 100644 --- a/stacks-common/src/util/retry.rs +++ b/stacks-common/src/util/retry.rs @@ -18,11 +18,7 @@ */ use std::io; -use std::io::prelude::*; -use std::io::{Read, Write}; - -use crate::util::hash::to_hex; -use crate::util::log; +use std::io::Read; /// Wrap a Read so that we store a copy of what was read. /// Used for re-trying reads when we don't know what to expect from the stream. diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index 5c64838855..353c58c428 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rand::{thread_rng, RngCore}; +use rand::RngCore; use secp256k1; use secp256k1::ecdsa::{ RecoverableSignature as LibSecp256k1RecoverableSignature, RecoveryId as LibSecp256k1RecoveryID, @@ -24,11 +24,9 @@ use secp256k1::{ PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use super::hash::Sha256Sum; -use crate::impl_byte_array_message_codec; use crate::types::{PrivateKey, PublicKey}; use crate::util::hash::{hex_bytes, to_hex}; @@ -442,8 +440,8 @@ mod tests { use secp256k1::{PublicKey as LibSecp256k1PublicKey, Secp256k1}; use super::*; + use crate::util::get_epoch_time_ms; use crate::util::hash::hex_bytes; - use crate::util::{get_epoch_time_ms, log}; struct KeyFixture { input: I, diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 0c2b2c3dad..5c7439daf9 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -22,16 +22,11 @@ use std::fmt::Debug; use std::hash::{Hash, Hasher}; /// This codebase is based on routines defined in the IETF draft for verifiable random functions /// over elliptic curves (https://tools.ietf.org/id/draft-irtf-cfrg-vrf-02.html). -use std::ops::Deref; -use std::ops::DerefMut; use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use curve25519_dalek::scalar::Scalar as ed25519_Scalar; -use ed25519_dalek::{ - SecretKey as EdDalekSecretKeyBytes, SigningKey as EdPrivateKey, VerifyingKey as EdPublicKey, -}; use rand; use sha2::{Digest, Sha512}; @@ -535,10 +530,8 @@ impl VRF { #[cfg(test)] mod tests { - use curve25519_dalek::scalar::Scalar as ed25519_Scalar; use rand; use rand::RngCore; - use sha2::Sha512; use super::*; use crate::util::hash::hex_bytes; From 7a15524b4955dc3c79123b29641866d5b299f57c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Wed, 1 Jan 2025 23:10:32 -0500 Subject: [PATCH 02/13] chore: Move feature-gated import inside function --- stacks-common/src/types/chainstate.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index e07e37f27a..89b244ae42 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -15,9 +15,6 @@ use crate::util::vrf::{VRFProof, VRF_PROOF_ENCODED_SIZE}; pub type StacksPublicKey = Secp256k1PublicKey; pub type StacksPrivateKey = Secp256k1PrivateKey; -#[cfg(any(test, feature = "testing"))] -use crate::util::hash::DoubleSha256; - /// Hash of a Trie node. This is a SHA2-512/256. #[derive(Default)] pub struct TrieHash(pub [u8; 32]); @@ -450,6 +447,8 @@ impl BurnchainHeaderHash { index_root: &TrieHash, noise: u64, ) -> BurnchainHeaderHash { + use crate::util::hash::DoubleSha256; + let mut bytes = vec![]; bytes.extend_from_slice(&block_height.to_be_bytes()); bytes.extend_from_slice(index_root.as_bytes()); From 8d335291c39c51c324cbc0ad47c45f05e3598b23 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 2 Jan 2025 15:59:52 -0500 Subject: [PATCH 03/13] fix: Feature flag `slog_json` working again --- stacks-common/src/util/log.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index 1699aa6f54..77a4950f81 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -190,6 +190,10 @@ impl TermFormat { #[cfg(feature = "slog_json")] fn make_json_logger() -> Logger { + use std::sync::Mutex; + + use slog::FnValue; + let def_keys = o!("file" => FnValue(move |info| { info.file() }), From 10e6c6d639eecd004311d0b699ee272e9fa2c59c Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 3 Jan 2025 11:49:47 -0500 Subject: [PATCH 04/13] fix(stacks-common): Remove remaining unused imports --- stacks-common/src/types/chainstate.rs | 2 +- stacks-common/src/types/mod.rs | 1 - stacks-common/src/util/mod.rs | 4 ++-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 89b244ae42..b7ecae66c4 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -1,4 +1,4 @@ -use std::fmt::{self, Display}; +use std::fmt; use std::io::{Read, Write}; use std::str::FromStr; diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index 297d629254..acb838f9c8 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 0e9ff49cca..46158d2f4f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -30,13 +30,13 @@ pub mod vrf; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; /// Given a relative path inside the Cargo workspace, return the absolute path #[cfg(any(test, feature = "testing"))] -pub fn cargo_workspace

(relative_path: P) -> PathBuf +pub fn cargo_workspace

(relative_path: P) -> std::path::PathBuf where P: AsRef, { From a02c9a60f5264ea82f571af150a594a6cdf23161 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Fri, 3 Jan 2025 17:36:04 -0500 Subject: [PATCH 05/13] chore: Apply Clippy lint `collection_is_never_used` --- stackslib/src/burnchains/tests/burnchain.rs | 13 ------ stackslib/src/burnchains/tests/db.rs | 4 -- stackslib/src/chainstate/coordinator/tests.rs | 33 -------------- .../chainstate/nakamoto/coordinator/tests.rs | 13 +----- .../src/chainstate/nakamoto/tests/mod.rs | 2 - stackslib/src/chainstate/stacks/block.rs | 13 ------ .../src/chainstate/stacks/boot/pox_4_tests.rs | 4 ++ .../src/chainstate/stacks/db/transactions.rs | 3 +- .../src/chainstate/stacks/index/test/trie.rs | 5 --- .../stacks/tests/block_construction.rs | 45 ------------------- .../stacks/tests/chain_histories.rs | 14 ------ stackslib/src/chainstate/stacks/tests/mod.rs | 4 -- .../src/chainstate/stacks/transaction.rs | 3 -- stackslib/src/core/tests/mod.rs | 4 +- stackslib/src/net/atlas/tests.rs | 5 --- stackslib/src/net/tests/download/epoch2x.rs | 5 +-- stackslib/src/net/tests/mempool/mod.rs | 5 +-- stackslib/src/net/tests/mod.rs | 6 +-- stackslib/src/net/tests/relay/nakamoto.rs | 8 ---- stackslib/src/util_lib/strings.rs | 2 +- testnet/stacks-node/src/event_dispatcher.rs | 4 ++ testnet/stacks-node/src/tests/epoch_21.rs | 6 --- .../src/tests/nakamoto_integrations.rs | 7 ++- .../src/tests/neon_integrations.rs | 10 ----- 24 files changed, 21 insertions(+), 197 deletions(-) diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 7f6be5bcf8..278d9b2910 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -698,19 +698,14 @@ fn test_burn_snapshot_sequence() { initial_reward_start_block: first_block_height, }; - let mut leader_private_keys = vec![]; let mut leader_public_keys = vec![]; let mut leader_bitcoin_public_keys = vec![]; - let mut leader_bitcoin_addresses = vec![]; for i in 0..32 { let mut csprng: ThreadRng = thread_rng(); let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::generate(&mut csprng)); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); - let privkey_hex = vrf_privkey.to_hex(); - leader_private_keys.push(privkey_hex); - let pubkey_hex = vrf_pubkey.to_hex(); leader_public_keys.push(pubkey_hex); @@ -718,12 +713,6 @@ fn test_burn_snapshot_sequence() { let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); - - leader_bitcoin_addresses.push(BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_data(&bitcoin_publickey.to_bytes()).0, - )); } let mut expected_burn_total: u64 = 0; @@ -732,7 +721,6 @@ fn test_burn_snapshot_sequence() { let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); let mut prev_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); - let mut all_stacks_block_hashes = vec![]; for i in 0..32 { let mut block_ops = vec![]; @@ -823,7 +811,6 @@ fn test_burn_snapshot_sequence() { burn_header_hash: burn_block_hash.clone(), }; - all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone()); block_ops.push(BlockstackOperationType::LeaderBlockCommit( next_block_commit, )); diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index c8f568b5bf..834a062088 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -919,8 +919,6 @@ fn test_update_block_descendancy_with_fork() { let mut cmts_genesis = vec![]; let mut cmts_invalid = vec![]; - let mut fork_parent = None; - let mut fork_parent_block_header: Option = None; let mut fork_cmts = vec![]; for i in 0..5 { @@ -954,7 +952,6 @@ fn test_update_block_descendancy_with_fork() { }; fork_headers.push(block_header.clone()); - fork_parent_block_header = Some(block_header); } let mut am_id = 0; @@ -1018,7 +1015,6 @@ fn test_update_block_descendancy_with_fork() { fork_cmts.push(fork_cmt.clone()); parent = Some(cmt); - fork_parent = Some(fork_cmt); if i == 0 { am_id = { diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index 0863708122..a56d0c6f67 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -2268,7 +2268,6 @@ fn test_sortition_with_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2442,10 +2441,6 @@ fn test_sortition_with_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2540,7 +2535,6 @@ fn test_sortition_with_burner_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2688,10 +2682,6 @@ fn test_sortition_with_burner_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2804,7 +2794,6 @@ fn test_pox_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_cycle_count = 0; @@ -2972,10 +2961,6 @@ fn test_pox_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -3096,7 +3081,6 @@ fn test_stx_transfer_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_recipients = HashSet::new(); @@ -3319,10 +3303,6 @@ fn test_stx_transfer_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5303,7 +5283,6 @@ fn test_sortition_with_sunset() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5487,10 +5466,6 @@ fn test_sortition_with_sunset() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5616,7 +5591,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5828,10 +5802,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -6479,7 +6449,6 @@ fn test_pox_fork_out_of_order() { let mut sortition_ids_diverged = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // setup: // 2 forks: 0 - 1 - 2 - 3 - 4 - 5 - 11 - 12 - 13 - 14 - 15 @@ -6560,8 +6529,6 @@ fn test_pox_fork_out_of_order() { .unwrap() .block_height ); - - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index e0b3375452..b8c93d427f 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -112,6 +112,8 @@ fn advance_to_nakamoto( let default_pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut tip = None; for sortition_height in 0..11 { // stack to pox-3 in cycle 7 @@ -347,9 +349,6 @@ fn replay_reward_cycle( .step_by(reward_cycle_length) .collect(); - let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect(); - indexes.shuffle(&mut thread_rng()); - for burn_ops in burn_ops.iter() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); } @@ -845,7 +844,6 @@ fn block_descendant() { boot_plan.pox_constants = pox_constants; let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); - let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; @@ -854,7 +852,6 @@ fn block_descendant() { loop { let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - blocks.push(block); if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) { info!("At prepare phase start"; "burn_height" => burn_height); @@ -3206,9 +3203,6 @@ fn test_stacks_on_burnchain_ops() { ); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut consensus_hashes = vec![]; - let mut fee_counts = vec![]; let stx_miner_key = peer.miner.nakamoto_miner_key(); let mut extra_burn_ops = vec![]; @@ -3406,8 +3400,6 @@ fn test_stacks_on_burnchain_ops() { }) .sum::(); - consensus_hashes.push(consensus_hash); - fee_counts.push(fees); let mut blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -3449,7 +3441,6 @@ fn test_stacks_on_burnchain_ops() { ); all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } // check receipts for burn ops diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index bd415b68b0..4d64a1e4f1 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2529,8 +2529,6 @@ fn parse_vote_for_aggregate_public_key_invalid() { }; invalid_function_arg_reward_cycle.set_origin_nonce(1); - let mut account_nonces = std::collections::HashMap::new(); - account_nonces.insert(invalid_contract_name.origin_address(), 1); for (i, tx) in vec![ invalid_contract_address, invalid_contract_name, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 51c53c94de..fa08e0f06d 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -1146,19 +1146,6 @@ mod test { StacksEpochId::latest(), ); - // remove all coinbases - let mut txs_anchored = vec![]; - - for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} - } - txs_anchored.push(tx); - } - // make microblocks with 3 transactions each (or fewer) for i in 0..(all_txs.len() / 3) { let txs = vec![ diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 072f1d33ef..f6c9b7d012 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -928,6 +928,8 @@ fn pox_lock_unlock() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; // Advance into pox4 @@ -2685,6 +2687,8 @@ fn pox_4_delegate_stack_increase_events() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; let alice_key = keys.pop().unwrap(); diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index e56624b84f..87e29e3f10 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -72,8 +72,9 @@ impl TryFrom for HashableClarityValue { impl std::hash::Hash for HashableClarityValue { fn hash(&self, state: &mut H) { - #[allow(clippy::unwrap_used)] + #[allow(clippy::unwrap_used, clippy::collection_is_never_read)] // this unwrap is safe _as long as_ TryFrom was used as a constructor + // Also, this function has side effects, which cause Clippy to wrongly think `bytes` is unused let bytes = self.0.serialize_to_vec().unwrap(); bytes.hash(state); } diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index 8625527a16..9a130bf9d7 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -1245,8 +1245,6 @@ fn trie_cursor_splice_leaf_4() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; - // splice in a node in each path segment for k in 0..5 { let mut path = vec![ @@ -1274,7 +1272,6 @@ fn trie_cursor_splice_leaf_4() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); @@ -1338,7 +1335,6 @@ fn trie_cursor_splice_leaf_2() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; // splice in a node in each path segment for k in 0..10 { @@ -1363,7 +1359,6 @@ fn trie_cursor_splice_leaf_2() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index bcf7611695..4b28f637a5 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -187,7 +187,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -271,8 +270,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -324,7 +321,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -412,8 +408,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -462,7 +456,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -574,8 +567,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1375,7 +1366,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -1515,8 +1505,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1794,7 +1782,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -1889,8 +1876,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1936,7 +1921,6 @@ fn test_build_anchored_blocks_empty_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2025,8 +2009,6 @@ fn test_build_anchored_blocks_empty_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2079,7 +2061,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2203,8 +2184,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2235,7 +2214,6 @@ fn test_build_anchored_blocks_invalid() { let mut last_block: Option = None; let mut last_valid_block: Option = None; - let mut last_tip: Option = None; let mut last_parent: Option = None; let mut last_parent_tip: Option = None; @@ -2267,8 +2245,6 @@ fn test_build_anchored_blocks_invalid() { tip = resume_tip.clone().unwrap(); } - last_tip = Some(tip.clone()); - let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_opt = if tenure_id != bad_block_tenure { @@ -2444,7 +2420,6 @@ fn test_build_anchored_blocks_bad_nonces() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { eprintln!("Start tenure {:?}", tenure_id); // send transactions to the mempool @@ -2640,8 +2615,6 @@ fn test_build_anchored_blocks_bad_nonces() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2699,7 +2672,6 @@ fn test_build_microblock_stream_forks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -2910,8 +2882,6 @@ fn test_build_microblock_stream_forks() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); } @@ -3494,12 +3464,6 @@ fn test_contract_call_across_clarity_versions() { let num_blocks = 10; let mut anchored_sender_nonce = 0; - let mut mblock_privks = vec![]; - for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::new(); - mblock_privks.push(mblock_privk); - } - let mut peer = TestPeer::new(peer_config); let chainstate_path = peer.chainstate_path.clone(); @@ -4566,7 +4530,6 @@ fn mempool_incorporate_pox_unlocks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()) @@ -4719,11 +4682,6 @@ fn mempool_incorporate_pox_unlocks() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - - last_block = Some(StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - )); } } @@ -4763,7 +4721,6 @@ fn test_fee_order_mismatch_nonce_order() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let sender_nonce = 0; - let mut last_block = None; // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap(); @@ -4852,8 +4809,6 @@ fn test_fee_order_mismatch_nonce_order() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index b8441a3cbb..763942c684 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -281,8 +281,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -415,8 +413,6 @@ where chain_tip.anchored_header.as_stacks_epoch2().unwrap(), )); - sortition_winners.push(miner_1.origin_address().unwrap()); - let mut next_miner_trace = TestMinerTracePoint::new(); next_miner_trace.add( miner_1.id, @@ -631,7 +627,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); next_miner_trace.add( miner_1.id, @@ -653,7 +648,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); next_miner_trace.add( miner_2.id, @@ -735,8 +729,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -960,7 +952,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -973,7 +964,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); } // add both blocks to the miner trace, because in this test runner, there will be _two_ @@ -999,8 +989,6 @@ where test_debug!("\n\nMiner 1 and Miner 2 now separate\n\n"); - let mut sortition_winners_1 = sortition_winners.clone(); - let mut sortition_winners_2 = sortition_winners.clone(); let snapshot_at_fork = { let ic = burn_node.sortdb.index_conn(); let tip = fork.get_tip(&ic); @@ -1244,7 +1232,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners_1.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -1257,7 +1244,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners_2.push(miner_2.origin_address().unwrap()); } // each miner produced a block; just one of them got accepted diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 9f5dd9c860..358ab3bf71 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -839,7 +839,6 @@ pub fn check_mining_reward( block_height: u64, prev_block_rewards: &Vec>, ) -> bool { - let mut block_rewards = HashMap::new(); let mut stream_rewards = HashMap::new(); let mut heights = HashMap::new(); let mut confirmed = HashSet::new(); @@ -849,9 +848,6 @@ pub fn check_mining_reward( &reward.consensus_hash, &reward.block_hash, ); - if reward.coinbase > 0 { - block_rewards.insert(ibh.clone(), reward.clone()); - } if let MinerPaymentTxFees::Epoch2 { streamed, .. } = &reward.tx_fees { if *streamed > 0 { stream_rewards.insert(ibh.clone(), reward.clone()); diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index d813dbcf01..765da5499d 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -3418,9 +3418,6 @@ mod test { let function_name = ClarityName::try_from("hello-function-name").unwrap(); let function_args = vec![Value::Int(0)]; - let mut contract_name_bytes = vec![contract_name.len() as u8]; - contract_name_bytes.extend_from_slice(contract_name.as_bytes()); - let mut contract_call_bytes = vec![]; address .consensus_serialize(&mut contract_call_bytes) diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index 4477c93b93..ed62de2b42 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -2692,7 +2692,6 @@ fn test_filter_txs_by_type() { version: 1, bytes: Hash160([0xff; 20]), }; - let mut txs = vec![]; let block_height = 10; let mut total_len = 0; @@ -2756,8 +2755,7 @@ fn test_filter_txs_by_type() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); - txs.push(tx); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 8094c77799..d0ecd2fe22 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -685,20 +685,15 @@ fn test_downloader_context_attachment_requests() { let peer_url_3 = request_3.get_url().clone(); let request_4 = inventories_requests.pop().unwrap(); let peer_url_4 = request_4.get_url().clone(); - let mut responses = HashMap::new(); let response_1 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_1.clone(), Some(response_1.clone())); let response_2 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_2.clone(), Some(response_2.clone())); let response_3 = new_attachments_inventory_response(vec![(0, vec![0, 1, 1]), (1, vec![1, 0, 0])]); - responses.insert(peer_url_3.clone(), Some(response_3.clone())); - responses.insert(peer_url_4, None); inventories_results .succeeded diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 9c995f1f32..fe193397ed 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -477,10 +477,9 @@ where info!("Completed walk round {} step(s)", round); - let mut peer_invs = vec![]; for peer in peers.iter_mut() { - let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks); - peer_invs.push(peer_inv); + // TODO: Remove if this function has no side effects + let _ = get_blocks_inventory(peer, 0, num_burn_blocks); let availability = get_peer_availability( peer, diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 436e5a315a..0b3ca27913 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -765,7 +765,6 @@ fn test_mempool_sync_2_peers_problematic() { let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); for i in 0..num_txs { @@ -792,8 +791,6 @@ fn test_mempool_sync_2_peers_problematic() { let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); let tx_fee = tx.get_tx_fee(); - txs.insert(tx.txid(), tx.clone()); - // should succeed MemPoolDB::try_add_tx( &mut mempool_tx, @@ -813,7 +810,7 @@ fn test_mempool_sync_2_peers_problematic() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); peer_1.mempool = Some(peer_1_mempool); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index 3a07ed006c..0329adc183 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -679,7 +679,6 @@ impl NakamotoBootPlan { let mut all_blocks = vec![]; let mut malleablized_block_ids = HashSet::new(); - let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; @@ -761,7 +760,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(next_consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() @@ -862,7 +860,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -958,14 +955,13 @@ impl NakamotoBootPlan { // each transaction was mined in the same order as described in the boot plan, // and it succeeded. - let mut burn_receipts = vec![]; let mut stacks_receipts = vec![]; for receipt in observed_block.receipts.iter() { match &receipt.transaction { TransactionOrigin::Stacks(..) => { stacks_receipts.push(receipt); } - TransactionOrigin::Burn(..) => burn_receipts.push(receipt), + TransactionOrigin::Burn(..) => {} } } diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index 606f1f3fb2..e26f1a3142 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -212,9 +212,6 @@ impl SeedNode { let test_stackers = peer.config.test_stackers.take().unwrap(); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut rc_blocks = vec![]; - let mut rc_burn_ops = vec![]; // have the peer mine some blocks for two reward cycles for i in 0..(2 * rc_len) { @@ -330,15 +327,10 @@ impl SeedNode { .burnchain .is_reward_cycle_start(tip.block_height) { - rc_blocks.push(all_blocks.clone()); - rc_burn_ops.push(all_burn_ops.clone()); - - all_burn_ops.clear(); all_blocks.clear(); } all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } peer.config.test_signers = Some(test_signers); diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index d1fb48c86b..87daf94ace 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -353,7 +353,7 @@ mod test { let mut contract_bytes = vec![s.len() as u8]; contract_bytes.extend_from_slice(contract_str.as_bytes()); - check_codec_and_corruption::(&contract_str, &clarity_bytes); + check_codec_and_corruption::(&contract_str, &contract_bytes); } #[test] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index da1668cdd2..18ee8c1797 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2266,6 +2266,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(request) = server.recv() { attempt += 1; @@ -2331,6 +2333,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(mut request) = server.recv() { attempt += 1; diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index d50cac0117..eaa1e584d7 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1045,7 +1045,6 @@ fn transition_adds_get_pox_addr_recipients() { ); let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let mut expected_pox_addrs = HashSet::new(); @@ -1056,7 +1055,6 @@ fn transition_adds_get_pox_addr_recipients() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, @@ -1353,8 +1351,6 @@ fn transition_adds_mining_from_segwit() { u32::MAX, ); - let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1363,8 +1359,6 @@ fn transition_adds_mining_from_segwit() { let spender_sk = StacksPrivateKey::new(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 1b84b9c0cd..9e4b07b119 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -2933,6 +2933,8 @@ fn block_proposal_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let path = format!("{http_origin}/v3/block_proposal"); + // Clippy thinks this is unused, but it seems to be holding a lock + #[allow(clippy::collection_is_never_read)] let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in test_cases.iter().enumerate() @@ -10395,7 +10397,6 @@ fn clarity_cost_spend_down() { .get_stacks_blocks_processed(); // Pause mining so we can add all our transactions to the mempool at once. TEST_MINE_STALL.lock().unwrap().replace(true); - let mut submitted_txs = vec![]; for _nmb_tx in 0..nmb_txs_per_signer { for sender_sk in sender_sks.iter() { let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces); @@ -10411,9 +10412,7 @@ fn clarity_cost_spend_down() { &[], ); match submit_tx_fallible(&http_origin, &contract_tx) { - Ok(txid) => { - submitted_txs.push(txid); - } + Ok(_txid) => {} Err(_e) => { // If we fail to submit a tx, we need to make sure we don't // increment the nonce for this sender, so we don't end up diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index a3ce78eb24..ed5fd9bbaa 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8473,8 +8473,6 @@ fn atlas_stress_integration_test() { panic!(); } - let mut all_zonefiles = vec![]; - // make a _ton_ of name-imports for i in 0..batches { let account_before = get_account(&http_origin, &to_addr(&user_1)); @@ -8486,8 +8484,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_3 = make_contract_call( &user_1, 2 + (batch_size * i + j) as u64, @@ -8675,8 +8671,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_6 = make_contract_call( &users[batches * batch_size + j], 1, @@ -8739,8 +8733,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_7 = make_contract_call( &users[batches * batch_size + j], 2, @@ -8802,8 +8794,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_8 = make_contract_call( &users[batches * batch_size + j], 3, From 9f1c4314ea02701c6b89b83795305519f1450cb4 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Thu, 23 Jan 2025 09:38:46 -0500 Subject: [PATCH 06/13] chore: Apply PR comments from Brice --- stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 5 +---- stackslib/src/net/tests/download/epoch2x.rs | 9 +++------ testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 +--- 3 files changed, 5 insertions(+), 13 deletions(-) diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index b8c93d427f..eb6fde3b12 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -112,9 +112,6 @@ fn advance_to_nakamoto( let default_pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone()); - // Stores the result of a function with side effects, so have Clippy ignore it - #[allow(clippy::collection_is_never_read)] - let mut tip = None; for sortition_height in 0..11 { // stack to pox-3 in cycle 7 let txs = if sortition_height == 6 { @@ -158,7 +155,7 @@ fn advance_to_nakamoto( vec![] }; - tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce)); + peer.tenure_with_txs(&txs, &mut peer_nonce); } // peer is at the start of cycle 8 } diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index ed255c5271..850be9cb5e 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -216,10 +216,10 @@ fn test_get_block_availability() { }) } -fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { +fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { let block_hashes = { let num_headers = end_height - start_height; - let ic = peer.sortdb.as_mut().unwrap().index_conn(); + let ic = peer.sortdb.as_ref().unwrap().index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) .unwrap() @@ -233,7 +233,7 @@ fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) }; let inv = peer - .chainstate() + .chainstate_ref() .get_blocks_inventory(&block_hashes) .unwrap(); inv @@ -476,9 +476,6 @@ where info!("Completed walk round {} step(s)", round); for peer in peers.iter_mut() { - // TODO: Remove if this function has no side effects - let _ = get_blocks_inventory(peer, 0, num_burn_blocks); - let availability = get_peer_availability( peer, first_stacks_block_height - first_sortition_height, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index fb305c72d2..b6c2eb372c 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -3025,8 +3025,6 @@ fn block_proposal_api_endpoint() { let http_origin = format!("http://{}", &conf.node.rpc_bind); let path = format!("{http_origin}/v3/block_proposal"); - // Clippy thinks this is unused, but it seems to be holding a lock - #[allow(clippy::collection_is_never_read)] let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap()); for (ix, (test_description, block_proposal, expected_http_code, _)) in test_cases.iter().enumerate() @@ -3084,7 +3082,7 @@ fn block_proposal_api_endpoint() { if ix == 1 { // release the test observer mutex so that the handler from 0 can finish! - hold_proposal_mutex.take(); + _ = hold_proposal_mutex.take(); } } From e9c1ab8a1611e933f2d66c2f582cdcaed33960ee Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 27 Jan 2025 09:46:11 -0500 Subject: [PATCH 07/13] chore: Minor refactoring --- stackslib/src/chainstate/stacks/tests/mod.rs | 34 ++++++-------------- 1 file changed, 10 insertions(+), 24 deletions(-) diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 85b43fb742..5e509ef0ae 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -967,22 +967,11 @@ pub fn get_last_microblock_header( miner: &TestMiner, parent_block_opt: Option<&StacksBlock>, ) -> Option { - let last_microblocks_opt = - parent_block_opt.and_then(|block| node.get_microblock_stream(miner, &block.block_hash())); - - let last_microblock_header_opt = match last_microblocks_opt { - Some(last_microblocks) => { - if last_microblocks.is_empty() { - None - } else { - let l = last_microblocks.len() - 1; - Some(last_microblocks[l].header.clone()) - } - } - None => None, - }; - - last_microblock_header_opt + parent_block_opt + .and_then(|block| node.get_microblock_stream(miner, &block.block_hash())) + .as_ref() + .and_then(|mblock_stream| mblock_stream.last()) + .map(|mblock| mblock.header.clone()) } pub fn get_all_mining_rewards( @@ -990,17 +979,14 @@ pub fn get_all_mining_rewards( tip: &StacksHeaderInfo, block_height: u64, ) -> Vec> { - let mut ret = vec![]; let mut tx = chainstate.index_tx_begin(); - for i in 0..block_height { - let block_rewards = + (0..block_height) + .map(|i| { StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, tip, i) - .unwrap(); - ret.push(block_rewards); - } - - ret + .unwrap() + }) + .collect() } pub fn make_coinbase(miner: &mut TestMiner, burnchain_height: usize) -> StacksTransaction { From 7699bc3ebb56c8f9100b738e7574c08c51195a85 Mon Sep 17 00:00:00 2001 From: Jeff Bencin Date: Mon, 27 Jan 2025 11:09:45 -0500 Subject: [PATCH 08/13] chore: Apply Clippy lint `manual_inspect` --- stackslib/src/burnchains/bitcoin/address.rs | 10 +-- stackslib/src/burnchains/bitcoin/bits.rs | 21 ++---- stackslib/src/burnchains/bitcoin/indexer.rs | 14 +--- stackslib/src/burnchains/bitcoin/spv.rs | 70 +++++------------- stackslib/src/burnchains/burnchain.rs | 33 ++++----- stackslib/src/chainstate/burn/db/sortdb.rs | 10 +-- .../burn/operations/leader_block_commit.rs | 10 +-- stackslib/src/chainstate/nakamoto/mod.rs | 69 ++++++++---------- stackslib/src/chainstate/nakamoto/shadow.rs | 5 +- stackslib/src/chainstate/stacks/db/blocks.rs | 17 ++--- stackslib/src/chainstate/stacks/index/marf.rs | 60 +++++---------- .../src/chainstate/stacks/index/storage.rs | 12 +-- stackslib/src/chainstate/stacks/index/trie.rs | 9 +-- stackslib/src/config/mod.rs | 3 +- stackslib/src/net/asn.rs | 20 ++--- stackslib/src/net/chat.rs | 32 +++----- .../nakamoto/download_state_machine.rs | 19 ++--- .../download/nakamoto/tenure_downloader.rs | 15 ++-- .../nakamoto/tenure_downloader_set.rs | 7 +- stackslib/src/net/httpcore.rs | 13 ++-- stackslib/src/net/inv/epoch2x.rs | 34 +++------ stackslib/src/net/inv/nakamoto.rs | 41 +++++------ stackslib/src/net/neighbors/comms.rs | 6 +- stackslib/src/net/neighbors/db.rs | 12 +-- stackslib/src/net/p2p.rs | 38 +++------- stackslib/src/net/relay.rs | 73 ++++++++++--------- 26 files changed, 245 insertions(+), 408 deletions(-) diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index 4cbc1ce80d..335177cd0c 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -302,9 +302,8 @@ impl SegwitBitcoinAddress { pub fn from_bech32(s: &str) -> Option { let (hrp, quintets, variant) = bech32::decode(s) - .map_err(|e| { - test_debug!("Failed to decode '{}': {:?}", s, &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode '{s}': {_e:?}"); }) .ok()?; @@ -327,9 +326,8 @@ impl SegwitBitcoinAddress { prog.append(&mut quintets[1..].to_vec()); let bytes = Vec::from_base32(&prog) - .map_err(|e| { - test_debug!("Failed to decode quintets: {:?}", &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode quintets: {_e:?}"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index 884f07a171..2a9745af25 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -112,22 +112,15 @@ impl BitcoinTxInputStructured { Instruction::PushBytes(payload) => payload, _ => { // not pushbytes, so this can't be a multisig script - test_debug!( - "Not a multisig script: Instruction {} is not a PushBytes", - i - ); + test_debug!("Not a multisig script: Instruction {i} is not a PushBytes"); return None; } }; let pubk = BitcoinPublicKey::from_slice(payload) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: pushbytes {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: pushbytes {i} is not a public key ({e:?})"); }) .ok()?; @@ -169,13 +162,9 @@ impl BitcoinTxInputStructured { for i in 0..pubkey_vecs.len() { let payload = &pubkey_vecs[i]; let pubk = BitcoinPublicKey::from_slice(&payload[..]) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: item {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: item {i} is not a public key ({e:?})"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index a9ccf8dfc5..c99e382769 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -627,12 +627,8 @@ impl BitcoinIndexer { )?; // what's the last header we have from the canonical history? - let canonical_end_block = orig_spv_client.get_headers_height().map_err(|e| { - error!( - "Failed to get the last block from {}", - canonical_headers_path - ); - e + let canonical_end_block = orig_spv_client.get_headers_height().inspect_err(|_e| { + error!("Failed to get the last block from {canonical_headers_path}"); })?; // bootstrap reorg client @@ -694,13 +690,12 @@ impl BitcoinIndexer { let reorg_headers = reorg_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read reorg Bitcoin headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; if reorg_headers.is_empty() { @@ -724,13 +719,12 @@ impl BitcoinIndexer { // got reorg headers. Find the equivalent headers in our canonical history let canonical_headers = orig_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read canonical headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; assert!( diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index fff8eaa06f..d12b261be9 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -832,10 +832,7 @@ impl SpvClient { // fetching headers in ascending order, so verify that the first item in // `block_headers` connects to a parent in the DB (if it has one) self.insert_block_headers_after(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let chain_tip = self.get_headers_height()?; @@ -843,22 +840,15 @@ impl SpvClient { (insert_height.saturating_sub(1)) / BLOCK_DIFFICULTY_CHUNK_SIZE, chain_tip / BLOCK_DIFFICULTY_CHUNK_SIZE + 1, ) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } else { // fetching headers in descending order, so verify that the last item in // `block_headers` connects to a child in the DB (if it has one) let headers_len = block_headers.len() as u64; self.insert_block_headers_before(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let interval_start = if insert_height % BLOCK_DIFFICULTY_CHUNK_SIZE == 0 { @@ -870,12 +860,8 @@ impl SpvClient { let interval_end = (insert_height + 1 + headers_len) / BLOCK_DIFFICULTY_CHUNK_SIZE + 1; self.validate_header_work(interval_start, interval_end) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } @@ -883,16 +869,12 @@ impl SpvClient { let total_work_after = self.update_chain_work()?; if total_work_after < total_work_before { error!( - "New headers represent less work than the old headers ({} < {})", - total_work_before, total_work_after + "New headers represent less work than the old headers ({total_work_before} < {total_work_after})" ); return Err(btc_error::InvalidChainWork); } - debug!( - "Handled {} Headers: {}-{}", - num_headers, first_header_hash, last_header_hash - ); + debug!("Handled {num_headers} Headers: {first_header_hash}-{last_header_hash}"); } else { debug!("Handled empty header reply"); } @@ -956,22 +938,16 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; - - let parent_header = match self.read_block_header(start_height)? { - Some(header) => header, - None => { - warn!( - "No header for block {} -- cannot insert {} headers into {}", - start_height, - block_headers.len(), - self.headers_path - ); - return Err(btc_error::NoncontiguousHeader); - } + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; + + let Some(parent_header) = self.read_block_header(start_height)? else { + warn!( + "No header for block {} -- cannot insert {} headers into {}", + start_height, + block_headers.len(), + self.headers_path + ); + return Err(btc_error::NoncontiguousHeader); }; // contiguous? @@ -1010,10 +986,7 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; match self.read_block_header(end_height)? { Some(child_header) => { @@ -1028,10 +1001,7 @@ impl SpvClient { None => { // if we're inserting headers in reverse order, we're not guaranteed to have the // child. - debug!( - "No header for child block {}, so will not validate continuity", - end_height - ); + debug!("No header for child block {end_height}, so will not validate continuity"); } } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 18fb27e27e..8bc7289ec2 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -683,11 +683,12 @@ impl Burnchain { if headers_height == 0 || headers_height < self.first_block_height { debug!("Fetch initial headers"); - indexer.sync_headers(headers_height, None).map_err(|e| { - error!("Failed to sync initial headers"); - sleep_ms(100); - e - })?; + indexer + .sync_headers(headers_height, None) + .inspect_err(|_e| { + error!("Failed to sync initial headers"); + sleep_ms(100); + })?; } Ok(()) } @@ -1137,13 +1138,9 @@ impl Burnchain { let headers_path = indexer.get_headers_path(); // sanity check -- what is the height of our highest header - let headers_height = indexer.get_highest_header_height().map_err(|e| { - error!( - "Failed to read headers height from {}: {:?}", - headers_path, &e - ); - e - })?; + let headers_height = indexer + .get_highest_header_height() + .inspect_err(|e| error!("Failed to read headers height from {headers_path}: {e:?}"))?; if headers_height == 0 { return Ok((0, false)); @@ -1152,16 +1149,12 @@ impl Burnchain { // did we encounter a reorg since last sync? Find the highest common ancestor of the // remote bitcoin peer's chain state. // Note that this value is 0-indexed -- the smallest possible value it returns is 0. - let reorg_height = indexer.find_chain_reorg().map_err(|e| { - error!("Failed to check for reorgs from {}: {:?}", headers_path, &e); - e - })?; + let reorg_height = indexer + .find_chain_reorg() + .inspect_err(|e| error!("Failed to check for reorgs from {headers_path}: {e:?}"))?; if reorg_height < headers_height { - warn!( - "Burnchain reorg detected: highest common ancestor at height {}", - reorg_height - ); + warn!("Burnchain reorg detected: highest common ancestor at height {reorg_height}"); return Ok((reorg_height, true)); } else { // no reorg diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 791ab19006..2fb6c1ca86 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -4896,16 +4896,12 @@ impl SortitionDB { let qry = "SELECT * FROM snapshots WHERE sortition_id = ?1"; let args = [&sortition_id]; query_row_panic(conn, qry, &args, || { - format!( - "FATAL: multiple block snapshots for the same block {}", - sortition_id - ) + format!("FATAL: multiple block snapshots for the same block {sortition_id}") }) - .map(|x| { + .inspect(|x| { if x.is_none() { - test_debug!("No snapshot with sortition ID {}", sortition_id); + test_debug!("No snapshot with sortition ID {sortition_id}"); } - x }) } diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index f996fd295a..64eff0a94c 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1131,19 +1131,17 @@ impl LeaderBlockCommitOp { .is_after_pox_sunset_end(self.block_height, epoch.epoch_id) { // sunset has begun and we're not in epoch 2.1 or later, so apply sunset check - self.check_after_pox_sunset().map_err(|e| { - warn!("Invalid block-commit: bad PoX after sunset: {:?}", &e; + self.check_after_pox_sunset().inspect_err(|e| { + warn!("Invalid block-commit: bad PoX after sunset: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })?; vec![] } else { // either in epoch 2.1, or the PoX sunset hasn't completed yet self.check_pox(epoch.epoch_id, burnchain, tx, reward_set_info) - .map_err(|e| { - warn!("Invalid block-commit: bad PoX: {:?}", &e; + .inspect_err(|e| { + warn!("Invalid block-commit: bad PoX: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })? }; diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 57bef8f749..5455e4e360 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1710,29 +1710,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_processed(block_id).map_err(|e| { - warn!("Failed to mark {} as processed: {:?}", block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_processed(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as processed: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -1748,29 +1745,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_orphaned(block_id).map_err(|e| { - warn!("Failed to mark {} as orphaned: {:?}", &block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_orphaned(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as orphaned: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - &block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -2352,12 +2346,11 @@ impl NakamotoChainState { let miner_pubkey_hash160 = leader_key .interpret_nakamoto_signing_key() .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!( "Leader key did not contain a hash160 of the miner signing public key"; "leader_key" => ?leader_key, ); - e })?; // attaches to burn chain @@ -2959,12 +2952,11 @@ impl NakamotoChainState { warn!("No VRF proof for {}", &parent_sn.consensus_hash); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find parent VRF proof"; "tip_block_id" => %tip_block_id, "parent consensus_hash" => %parent_sn.consensus_hash, "block consensus_hash" => %consensus_hash); - e })?; Ok(parent_vrf_proof) @@ -3029,12 +3021,11 @@ impl NakamotoChainState { } let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) - .map_err(|e| { + .inspect_err(|_e| { warn!("Failed to load VRF proof: could not decode"; "vrf_proof" => %bytes, "tenure_start_block_id" => %tenure_start_block_id, ); - e })?; Ok(Some(proof)) } else { @@ -3087,25 +3078,23 @@ impl NakamotoChainState { let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; let block_commit = get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; // N.B. passing block.block_id() here means that we'll look into the parent tenure diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index 67a57a2ca0..46849e5fbb 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -347,14 +347,13 @@ impl NakamotoChainState { let vrf_proof = Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &tenure_consensus_hash)? .ok_or_else(|| { - warn!("No VRF proof for {}", &tenure_consensus_hash); + warn!("No VRF proof for {tenure_consensus_hash}"); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find shadow tenure VRF proof"; "tip_block_id" => %tip_block_id, "shadow consensus_hash" => %tenure_consensus_hash); - e })?; return Ok(Some(vrf_proof)); diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 295f3708aa..4a5e2443e1 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -500,20 +500,19 @@ impl StacksChainState { .open(&path_tmp) .map_err(|e| { if e.kind() == io::ErrorKind::NotFound { - error!("File not found: {:?}", &path_tmp); + error!("File not found: {path_tmp:?}"); Error::DBError(db_error::NotFoundError) } else { - error!("Failed to open {:?}: {:?}", &path_tmp, &e); + error!("Failed to open {path_tmp:?}: {e:?}"); Error::DBError(db_error::IOError(e)) } })?; - writer(&mut fd).map_err(|e| { + writer(&mut fd).inspect_err(|_e| { if delete_on_error { // abort let _ = fs::remove_file(&path_tmp); } - e })?; fd.sync_all() @@ -3983,7 +3982,7 @@ impl StacksChainState { } for (consensus_hash, anchored_block_hash) in to_delete.into_iter() { - info!("Orphan {}/{}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork.", &consensus_hash, &anchored_block_hash); + info!("Orphan {consensus_hash}/{anchored_block_hash}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork."); let _ = StacksChainState::set_block_processed( blocks_tx, None, @@ -3992,12 +3991,8 @@ impl StacksChainState { &anchored_block_hash, false, ) - .map_err(|e| { - warn!( - "Failed to orphan {}/{}: {:?}", - &consensus_hash, &anchored_block_hash, &e - ); - e + .inspect_err(|e| { + warn!("Failed to orphan {consensus_hash}/{anchored_block_hash}: {e:?}") }); } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 368c21c204..7bec45bdfe 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -440,13 +440,12 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { if new_extension { self.set_block_heights(chain_tip, next_chain_tip, block_height) - .map_err(|e| { + .inspect_err(|_e| { self.open_chain_tip.take(); - e })?; } - debug!("Opened {} to {}", chain_tip, next_chain_tip); + debug!("Opened {chain_tip} to {next_chain_tip}"); Ok(()) } @@ -932,9 +931,8 @@ impl MARF { let mut cursor = TrieCursor::new(path, storage.root_trieptr()); // walk to insertion point - let mut node = Trie::read_root_nohash(storage).map_err(|e| { - test_debug!("Failed to read root of {:?}: {:?}", block_hash, &e); - e + let mut node = Trie::read_root_nohash(storage).inspect_err(|_e| { + test_debug!("Failed to read root of {block_hash:?}: {_e:?}"); })?; for _ in 0..(cursor.path.len() + 1) { @@ -956,7 +954,7 @@ impl MARF { )); } - trace!("Cursor reached leaf {:?}", &node); + trace!("Cursor reached leaf {node:?}"); storage.bench_mut().marf_walk_from_finish(); return Ok((cursor, node)); } @@ -1035,24 +1033,16 @@ impl MARF { block_hash: &T, path: &TrieHash, ) -> Result, Error> { - trace!("MARF::get_path({:?}) {:?}", block_hash, path); + trace!("MARF::get_path({block_hash:?}) {path:?}"); // a NotFoundError _here_ means that a block didn't exist - storage.open_block(block_hash).map_err(|e| { - test_debug!("Failed to open block {:?}: {:?}", block_hash, &e); - e + storage.open_block(block_hash).inspect_err(|_e| { + test_debug!("Failed to open block {block_hash:?}: {_e:?}"); })?; // a NotFoundError _here_ means that the key doesn't exist in this view - let (cursor, node) = MARF::walk(storage, block_hash, path).map_err(|e| { - trace!( - "Failed to look up key {:?} {:?}: {:?}", - &block_hash, - path, - &e - ); - e - })?; + let (cursor, node) = MARF::walk(storage, block_hash, path) + .inspect_err(|e| trace!("Failed to look up key {block_hash:?} {path:?}: {e:?}"))?; // both of these get caught by get_by_key and turned into Ok(None) // and a lot of downstream code seems to depend on that behavior, but @@ -1177,13 +1167,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed path lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed path lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1208,13 +1194,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed key lookup '{}': {:?}", key, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed key lookup '{key}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1237,13 +1219,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed hash lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed hash lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 0eb60f25b4..db99f8004e 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -892,10 +892,8 @@ impl TrieRAM { let root_disk_ptr = BLOCK_HEADER_HASH_ENCODED_SIZE as u64 + 4; let root_ptr = TriePtr::new(TrieNodeID::Node256 as u8, 0, root_disk_ptr as u32); - let (mut root_node, root_hash) = read_nodetype(f, &root_ptr).map_err(|e| { - error!("Failed to read root node info for {:?}: {:?}", bhh, &e); - e - })?; + let (mut root_node, root_hash) = read_nodetype(f, &root_ptr) + .inspect_err(|e| error!("Failed to read root node info for {bhh:?}: {e:?}"))?; let mut next_index = 1; @@ -922,10 +920,8 @@ impl TrieRAM { let next_ptr = frontier .pop_front() .expect("BUG: no ptr in non-empty frontier"); - let (mut next_node, next_hash) = read_nodetype(f, &next_ptr).map_err(|e| { - error!("Failed to read node at {:?}: {:?}", &next_ptr, &e); - e - })?; + let (mut next_node, next_hash) = read_nodetype(f, &next_ptr) + .inspect_err(|e| error!("Failed to read node at {next_ptr:?}: {e:?}"))?; if !next_node.is_leaf() { // queue children in the same order we stored them diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 0603c74c43..e701858fd1 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -217,22 +217,19 @@ impl Trie { // ptr is a backptr -- find the block let back_block_hash = storage .get_block_from_local_id(ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!("Failed to get block from local ID {}", ptr.back_block()); - e })? .clone(); storage .open_block_known_id(&back_block_hash, ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!( - "Failed to open block {} with id {}: {:?}", + "Failed to open block {} with id {}: {_e:?}", &back_block_hash, ptr.back_block(), - &e ); - e })?; let backptr = ptr.from_backptr(); diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index f4472d8fbc..ad1e2f6f1d 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -1579,9 +1579,8 @@ impl BurnchainConfigFile { .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), max_unspent_utxos: self .max_unspent_utxos - .map(|val| { + .inspect(|&val| { assert!(val <= 1024, "Value for max_unspent_utxos should be <= 1024"); - val }) .or(default_burnchain_config.max_unspent_utxos), }; diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index c28e82484b..fb1f66b481 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -122,9 +122,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Line does not match ANS4 regex".to_string(), )) - .map_err(|e| { - debug!("Failed to read line \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to read line \"{buf}\""); })?; let prefix_octets_str = caps @@ -132,9 +131,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ANS4 prefix".to_string(), )) - .map_err(|e| { - debug!("Failed to get octets of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get octets of \"{buf}\""); })? .as_str(); @@ -143,9 +141,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN4 prefix mask".to_string(), )) - .map_err(|e| { - debug!("Failed to get mask of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get mask of \"{buf}\""); })? .as_str(); @@ -154,9 +151,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN ID".to_string(), )) - .map_err(|e| { - debug!("Failed to get ASN of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get ASN of \"{buf}\""); })? .as_str(); diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index 740d81b254..ecf533e21b 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -962,10 +962,9 @@ impl ConversationP2P { reply_message, request_preamble.seq, )?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!("Unable to reply a {}: {:?}", _msgtype, &e); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|e| debug!("Unable to reply a {_msgtype}: {e:?}"))?; Ok(reply_handle) } @@ -981,10 +980,9 @@ impl ConversationP2P { let _msgtype = forward_message.get_message_name().to_owned(); let fwd = self.sign_relay_message(local_peer, burnchain_view, relay_hints, forward_message)?; - let fwd_handle = self.relay_signed_message(fwd).map_err(|e| { - debug!("Unable to forward a {}: {:?}", _msgtype, &e); - e - })?; + let fwd_handle = self + .relay_signed_message(fwd) + .inspect_err(|e| debug!("Unable to forward a {_msgtype}: {e:?}"))?; Ok(fwd_handle) } @@ -1475,13 +1473,9 @@ impl ConversationP2P { neighbors: neighbor_addrs, }); let reply = self.sign_reply(chain_view, &local_peer.private_key, payload, preamble.seq)?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!( - "Outbox to {:?} is full; cannot reply to GetNeighbors", - &self - ); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|_e| debug!("Outbox to {self:?} is full; cannot reply to GetNeighbors"))?; Ok(reply_handle) } @@ -1747,12 +1741,8 @@ impl ConversationP2P { &network.stacks_tip.block_hash, reward_cycle, )?; - let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { - warn!( - "Failed to create a NakamotoInv response to {:?}: {:?}", - get_nakamoto_inv, &e - ); - e + let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).inspect_err(|e| { + warn!("Failed to create a NakamotoInv response to {get_nakamoto_inv:?}: {e:?}") })?; debug!( diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index 36443b46e4..b5a6af1153 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1186,12 +1186,11 @@ impl NakamotoDownloadStateMachine { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e - ); - e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr + ) }); debug!( @@ -1257,13 +1256,11 @@ impl NakamotoDownloadStateMachine { { if let Some(highest_complete_tenure_downloader) = downloader .make_highest_complete_tenure_downloader() - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e + "Failed to make highest complete tenure downloader for {:?}: {e:?}", + &downloader.unconfirmed_tenure_id() + ) }) .ok() { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 1d4d680c43..6e98703956 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -781,9 +781,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_start_block(block)?; Ok(None) @@ -794,9 +793,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_end_block(&block)?; Ok(None) @@ -807,9 +805,8 @@ impl NakamotoTenureDownloader { &end_block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e + let blocks = response.decode_nakamoto_tenure().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {e:?}") })?; let blocks_opt = self.try_accept_tenure_blocks(blocks)?; Ok(blocks_opt) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index f8054828b6..d73342164e 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -571,12 +571,11 @@ impl NakamotoTenureDownloaderSet { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr ); - e }); debug!( diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 00fa0948bd..2a47be3547 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1275,9 +1275,8 @@ impl StacksHttp { return Err(NetError::InvalidState); } if let Some(reply) = self.reply.as_mut() { - match reply.stream.consume_data(fd).map_err(|e| { + match reply.stream.consume_data(fd).inspect_err(|_e| { self.reset(); - e })? { (Some((byte_vec, bytes_total)), sz) => { // done receiving @@ -1491,11 +1490,11 @@ impl ProtocolFamily for StacksHttp { } // message of unknown length. Buffer up and maybe we can parse it. - let (message_bytes_opt, num_read) = - self.consume_data(http_response_preamble, fd).map_err(|e| { - self.reset(); - e - })?; + let (message_bytes_opt, num_read) = self + .consume_data(http_response_preamble, fd) + .inspect_err(|_e| { + self.reset(); + })?; match message_bytes_opt { Some((message_bytes, total_bytes_consumed)) => { diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 322b1b826c..99253816f5 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1847,10 +1847,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getpoxinv_begin(request, target_pox_reward_cycle); if let Some(event_id) = event_id_opt { @@ -2040,10 +2037,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getblocksinv_begin(request, target_block_reward_cycle, num_blocks_expected); if let Some(event_id) = event_id_opt { @@ -2605,18 +2599,13 @@ impl PeerNetwork { // if this succeeds, then we should be able to make a BlocksInv let ancestor_sn = self .get_ancestor_sortition_snapshot(sortdb, target_block_height) - .map_err(|e| { - debug!( - "Failed to load ancestor sortition snapshot at height {}: {:?}", - target_block_height, &e - ); - e + .inspect_err(|e| { + debug!( "Failed to load ancestor sortition snapshot at height {target_block_height}: {e:?}") })?; - let tip_sn = self.get_tip_sortition_snapshot(sortdb).map_err(|e| { - debug!("Failed to load tip sortition snapshot: {:?}", &e); - e - })?; + let tip_sn = self + .get_tip_sortition_snapshot(sortdb) + .inspect_err(|e| debug!("Failed to load tip sortition snapshot: {e:?}"))?; let getblocksinv = GetBlocksInv { consensus_hash: ancestor_sn.consensus_hash, @@ -2634,12 +2623,11 @@ impl PeerNetwork { let blocks_inv = ConversationP2P::make_getblocksinv_response(self, sortdb, chainstate, &getblocksinv) - .map_err(|e| { + .inspect_err(|e| { debug!( - "Failed to load blocks inventory at reward cycle {} ({:?}): {:?}", - reward_cycle, &ancestor_sn.consensus_hash, &e - ); - e + "Failed to load blocks inventory at reward cycle {reward_cycle} ({:?}): {e:?}", + &ancestor_sn.consensus_hash + ); })?; match blocks_inv { diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 74cc8f0d0e..9bebbaf642 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -982,24 +982,22 @@ impl NakamotoInvStateMachine { ); let Some(inv) = self.inventories.get_mut(&naddr) else { debug!( - "{:?}: Got a reply for an untracked inventory peer {}: {:?}", + "{:?}: Got a reply for an untracked inventory peer {naddr}: {reply:?}", network.get_local_peer(), - &naddr, - &reply ); continue; }; - let Ok(inv_learned) = inv.getnakamotoinv_try_finish(network, reply).map_err(|e| { - warn!( - "{:?}: Failed to finish inventory sync to {}: {:?}", - network.get_local_peer(), - &naddr, - &e - ); - self.comms.add_broken(network, &naddr); - e - }) else { + let Ok(inv_learned) = inv + .getnakamotoinv_try_finish(network, reply) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish inventory sync to {naddr}: {e:?}", + network.get_local_peer() + ); + self.comms.add_broken(network, &naddr); + }) + else { continue; }; @@ -1051,14 +1049,15 @@ impl NakamotoInvStateMachine { &e ); } - let Ok((_, learned)) = self.process_getnakamotoinv_finishes(network).map_err(|e| { - warn!( - "{:?}: Failed to finish Nakamoto tenure inventory sync: {:?}", - network.get_local_peer(), - &e - ); - e - }) else { + let Ok((_, learned)) = self + .process_getnakamotoinv_finishes(network) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish Nakamoto tenure inventory sync: {e:?}", + network.get_local_peer(), + ) + }) + else { self.last_sort_tip = Some(network.burnchain_tip.clone()); return false; }; diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 821952af33..48759c913d 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -106,14 +106,12 @@ pub trait NeighborComms { let msg = network .sign_for_neighbor(&nk, StacksMessageType::Handshake(handshake_data)) - .map_err(|e| { + .inspect_err(|_e| { info!( - "{:?}: Failed to sign for peer {:?}", + "{:?}: Failed to sign for peer {nk:?}", network.get_local_peer(), - &nk ); self.add_dead(network, &nk); - e })?; network diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index a9d2268fca..3b1d99e906 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -223,26 +223,22 @@ pub trait NeighborWalkDB { // favor neighbors with older last-contact times let next_neighbors_res = self .get_fresh_random_neighbors(network, (NUM_NEIGHBORS as u64) * 2) - .map_err(|e| { + .inspect_err(|e| { debug!( - "{:?}: Failed to load fresh initial walk neighbors: {:?}", + "{:?}: Failed to load fresh initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e }); let db_neighbors = if let Ok(neighbors) = next_neighbors_res { neighbors } else { let any_neighbors = Self::pick_walk_neighbors(network, (NUM_NEIGHBORS as u64) * 2, 0) - .map_err(|e| { + .inspect_err(|e| { info!( - "{:?}: Failed to load any initial walk neighbors: {:?}", + "{:?}: Failed to load any initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e })?; any_neighbors diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 54c0428548..40591c6ddc 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -2692,22 +2692,16 @@ impl PeerNetwork { &self.local_peer.private_key, StacksMessageType::NatPunchRequest(nonce), ) - .map_err(|e| { - info!("Failed to sign NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to sign NAT punch request: {e:?}"))?; let mut rh = convo .send_signed_request(natpunch_request, self.connection_opts.timeout) - .map_err(|e| { - info!("Failed to send NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to send NAT punch request: {e:?}"))?; - self.saturate_p2p_socket(event_id, &mut rh).map_err(|e| { - info!("Failed to saturate NAT punch socket on event {}", &event_id); - e - })?; + self.saturate_p2p_socket(event_id, &mut rh) + .inspect_err(|_e| { + info!("Failed to saturate NAT punch socket on event {event_id}") + })?; self.public_ip_reply_handle = Some(rh); break; @@ -3669,15 +3663,13 @@ impl PeerNetwork { // always do block download let new_blocks = self .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { + .inspect_err(|e| { warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e + "{:?}: Failed to perform Nakamoto block sync: {e:?}", + &self.get_local_peer() + ) }) - .unwrap_or(HashMap::new()); + .unwrap_or_default(); network_result.consume_nakamoto_blocks(new_blocks); @@ -4407,13 +4399,7 @@ impl PeerNetwork { sortdb, &OnChainRewardSetProvider::new(), ) - .map_err(|e| { - warn!( - "Failed to load reward cycle info for cycle {}: {:?}", - rc, &e - ); - e - }) + .inspect_err(|e| warn!("Failed to load reward cycle info for cycle {rc}: {e:?}")) .unwrap_or(None) else { continue; }; diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 26c0fed831..6b1995ab65 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -949,14 +949,12 @@ impl Relayer { if chainstate .nakamoto_blocks_db() .has_nakamoto_block_with_index_hash(&block.header.block_id()) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to determine if we have Nakamoto block {}/{}: {:?}", + "Failed to determine if we have Nakamoto block {}/{}: {e:?}", &block.header.consensus_hash, - &block.header.block_hash(), - &e + &block.header.block_hash() ); - e })? { if force_broadcast { @@ -3135,21 +3133,22 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); continue; } }; // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to announce {} entries to {:?}: {:?}", - &self.local_peer, num_blocks, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to announce {num_blocks} entries to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } } @@ -3170,26 +3169,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push block {}/{} to {:?}", - &self.local_peer, &ch, &blk_hash, recipient + "{:?}: Push block {ch}/{blk_hash} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push block {}/{} to {:?}: {:?}", - &self.local_peer, &ch, &blk_hash, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push block {ch}/{blk_hash} to {recipient:?}: {e:?}", + &self.local_peer + ) + }); } /// Try to push a confirmed microblock stream to a peer. @@ -3210,26 +3210,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push microblocks for {} to {:?}", - &self.local_peer, &idx_bhh, recipient + "{:?}: Push microblocks for {idx_bhh} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push microblocks for {} to {:?}: {:?}", - &self.local_peer, &idx_bhh, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push microblocks for {idx_bhh} to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } /// Announce blocks that we have to an outbound peer that doesn't have them. From da16af9498dbc1d865b682fd176e4fcf8c74d8ad Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 14:50:11 -0500 Subject: [PATCH 09/13] feat: allow other transactions with tenure extends We want the heuristic to be such that the miner mines block found tenure changes quickly, only including the tenure change and the coinbase, but tenure extensions do not require this quick response, so they should include other transactions. Fixes #5577 --- .github/workflows/bitcoin-tests.yml | 1 + stackslib/src/chainstate/stacks/miner.rs | 8 +- .../src/tests/nakamoto_integrations.rs | 12 +- testnet/stacks-node/src/tests/signer/v0.rs | 109 +++++++++++++++++- 4 files changed, 120 insertions(+), 10 deletions(-) diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 363e02044f..21ad473af8 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -124,6 +124,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_with_other_transactions - tests::signer::v0::tenure_extend_after_idle_miner - tests::signer::v0::tenure_extend_after_failed_miner - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f4c7286f58..9e661e4460 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -2258,7 +2258,13 @@ impl StacksBlockBuilder { // nakamoto miner tenure start heuristic: // mine an empty block so you can start your tenure quickly! if let Some(tx) = initial_txs.first() { - if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { + if matches!( + &tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) { info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); return Ok((false, tx_events)); } diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 30c3cfed3b..375de8a367 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -247,9 +247,15 @@ pub fn check_nakamoto_empty_block_heuristics() { continue; } let txs = test_observer::parse_transactions(block); - let has_tenure_change = txs - .iter() - .any(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))); + let has_tenure_change = txs.iter().any(|tx| { + matches!( + tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) + }); if has_tenure_change { let only_coinbase_and_tenure_change = txs.iter().all(|tx| { matches!( diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f9050644dc..3b963ba500 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2616,6 +2616,51 @@ fn tenure_extend_after_idle_signers() { return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |_| {}, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner will include other transactions with a TenureExtend transaction. +fn tenure_extend_with_other_transactions() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) @@ -2627,7 +2672,7 @@ fn tenure_extend_after_idle_signers() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -2639,20 +2684,72 @@ fn tenure_extend_after_idle_signers() { None, None, ); - let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - info!("---- Waiting for a tenure extend ----"); + info!("Pause miner so it doesn't propose a block before the tenure extend"); + TEST_MINE_STALL.set(true); + + // Submit a transaction to be included with the tenure extend + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let _tx = submit_tx(&http_origin, &transfer_tx); + + info!("---- Wait for tenure extend timeout ----"); + + sleep_ms(idle_timeout.as_millis() as u64 + 1000); + + info!("---- Resume miner to propose a block with the tenure extend ----"); + TEST_MINE_STALL.set(false); // Now, wait for a block with a tenure extend wait_for(idle_timeout.as_secs() + 10, || { - Ok(last_block_contains_tenure_change_tx( - TenureChangeCause::Extended, - )) + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let (first_tx, other_txs) = transactions.split_first().unwrap(); + let raw_tx = first_tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let found_tenure_extend = match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => + { + info!("Found tenure extend transaction: {parsed:?}"); + true + } + _ => false, + }; + if found_tenure_extend { + let found_transfer = other_txs.iter().any(|tx| { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TokenTransfer(..) => true, + _ => false, + } + }); + if found_transfer { + info!("Found transfer transaction"); + Ok(true) + } else { + Err("No transfer transaction found together with the tenure extend".to_string()) + } + } else { + info!("No tenure change transaction found"); + Ok(false) + } }) .expect("Timed out waiting for a block with a tenure extend"); From 2272b8f787ffbc5cc205222c1ded3944066a3f64 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 15:32:09 -0500 Subject: [PATCH 10/13] docs: add changelog entry --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d9631ccf65..226f7b5159 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,12 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Changed + +- Miner will include other transactions in blocks with tenure extend transactions (#5760) + ## [3.1.0.0.4] ### Added From 02d595c2b48ea52122307b7bede1691dc606e515 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 16:46:06 -0500 Subject: [PATCH 11/13] test: fix `tests::nakamoto_integrations::continue_tenure_extend` --- testnet/stacks-node/src/tests/nakamoto_integrations.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 375de8a367..f487333905 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -7206,7 +7206,9 @@ fn continue_tenure_extend() { let mut tenure_block_founds = vec![]; let mut transfer_tx_included = false; let mut last_block_had_extend = false; - for block in test_observer::get_blocks() { + for pair in test_observer::get_blocks().windows(2) { + let prev_block = &pair[0]; + let block = &pair[1]; let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); @@ -7227,8 +7229,10 @@ fn continue_tenure_extend() { tenure_extends.push(parsed); } TenureChangeCause::BlockFound => { - if last_block_had_extend { - panic!("Expected a Nakamoto block to happen after tenure extend block"); + if last_block_had_extend + && prev_block["transactions"].as_array().unwrap().len() <= 1 + { + panic!("Expected other transactions to happen after tenure extend"); } tenure_block_founds.push(parsed); } From 1e5b6544e1bfd3563c662920b8566c38f0fed7d9 Mon Sep 17 00:00:00 2001 From: Brice Dobry Date: Tue, 28 Jan 2025 17:18:02 -0500 Subject: [PATCH 12/13] test: fix `tests::signer::v0::continue_after_fast_block_no_sortition` This test was not quite matching its description and the behavior changed a bit with the changes in this PR. This commit updates the test and the description. --- testnet/stacks-node/src/tests/signer/v0.rs | 72 +++++++++++----------- 1 file changed, 35 insertions(+), 37 deletions(-) diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index 3b963ba500..2bbcccaced 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -6535,19 +6535,22 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Mine 2 empty burn blocks (simulate fast blocks scenario) /// Miner 2 proposes block N+1 with a TenureChangePayload /// Signers accept and the stacks tip advances to N+1 -/// Miner 2 proposes block N+2 with a TokenTransfer +/// Miner 2 proposes block N+2 with a TenureExtend /// Signers accept and the stacks tip advances to N+2 +/// Miner 2 proposes block N+3 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+3 /// Mine an empty burn block -/// Miner 2 proposes block N+3 with a TenureExtend -/// Signers accept and the chain advances to N+3 -/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Miner 2 proposes block N+4 with a TenureExtend /// Signers accept and the chain advances to N+4 +/// Miner 1 wins the next tenure and proposes a block N+5 with a TenureChangePayload +/// Signers accept and the chain advances to N+5 /// Asserts: /// - Block N+1 contains the TenureChangePayload -/// - Block N+2 contains the TokenTransfer -/// - Block N+3 contains the TenureExtend -/// - Block N+4 contains the TenureChangePayload -/// - The stacks tip advances to N+4 +/// - Block N+2 contains the TenureExtend +/// - Block N+3 contains the TokenTransfer +/// - Block N+4 contains the TenureExtend +/// - Block N+5 contains the TenureChangePayload +/// - The stacks tip advances to N+5 #[test] #[ignore] fn continue_after_fast_block_no_sortition() { @@ -6908,7 +6911,7 @@ fn continue_after_fast_block_no_sortition() { // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); - info!("------------------------- Wait for Miner B's Block N -------------------------"; + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"; "blocks_processed_before_2" => %blocks_processed_before_2, "stacks_height_before" => %stacks_height_before, "nmb_old_blocks" => %nmb_old_blocks); @@ -6923,7 +6926,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; + info!("Waiting for Miner B's Block N+1"; "blocks_mined1_val" => %blocks_mined1_val, "blocks_mined2_val" => %blocks_mined2_val, "stacks_height" => %stacks_height, @@ -6938,11 +6941,11 @@ fn continue_after_fast_block_no_sortition() { .expect("Timed out waiting for block to be mined and processed"); info!( - "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" + "------------------------- Verify Tenure Change Tx in Miner B's Block N+1 -------------------------" ); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + info!("------------------------- Wait for Miner B's Block N+2 -------------------------"); let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); @@ -6952,18 +6955,7 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); - - // wait for the tenure-extend block to be processed + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client @@ -6978,8 +6970,12 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); + info!("------------------------- Verify Miner B's Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + info!("------------------------- Wait for Miner B's Block N+3 -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let stacks_height_before = signer_test @@ -6988,22 +6984,24 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // wait for the new block with the STX transfer to be processed + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - - let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); - let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; - "blocks_mined1_val" => %blocks_mined1_val, - "blocks_mined2_val" => %blocks_mined2_val, - "stacks_height" => %stacks_height, - "observed_blocks" => %test_observer::get_blocks().len()); - Ok( blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 && stacks_height > stacks_height_before @@ -7012,7 +7010,7 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); - info!("------------------------- Verify Miner B's Block N+1 -------------------------"); + info!("------------------------- Verify Miner B's Block N+3 -------------------------"); verify_last_block_contains_transfer_tx(); @@ -7029,7 +7027,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); btc_blocks_mined += 1; - info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+4 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); info!("------------------------- Unpause Miner A's Block Commits -------------------------"); @@ -7064,7 +7062,7 @@ fn continue_after_fast_block_no_sortition() { assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+5 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!( From 9b0f23369b49366a4169043165553cd681aa4fa6 Mon Sep 17 00:00:00 2001 From: Hank Stoever Date: Wed, 29 Jan 2025 07:38:12 -0800 Subject: [PATCH 13/13] fix: always send pending block validation after receiving a block validation result --- stacks-signer/src/v0/signer.rs | 47 +++++++++++++++++----------------- 1 file changed, 24 insertions(+), 23 deletions(-) diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 70253f8258..abe05beb40 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -818,31 +818,32 @@ impl Signer { .remove_pending_block_validation(&signer_sig_hash) .unwrap_or_else(|e| warn!("{self}: Failed to remove pending block validation: {e:?}")); - let Some(response) = block_response else { - return; - }; - // Submit a proposal response to the .signers contract for miners - info!( - "{self}: Broadcasting a block response to stacks node: {response:?}"; - ); - let accepted = matches!(response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(response.into()) - { - Ok(_) => { - crate::monitoring::actions::increment_block_responses_sent(accepted); - if let Ok(Some(block_info)) = self - .signer_db - .block_lookup(&block_validate_response.signer_signature_hash()) - { - crate::monitoring::actions::record_block_response_latency(&block_info.block); + if let Some(response) = block_response { + // Submit a proposal response to the .signers contract for miners + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + ); + let accepted = matches!(response, BlockResponse::Accepted(..)); + match self + .stackerdb + .send_message_with_retry::(response.into()) + { + Ok(_) => { + crate::monitoring::actions::increment_block_responses_sent(accepted); + if let Ok(Some(block_info)) = self + .signer_db + .block_lookup(&block_validate_response.signer_signature_hash()) + { + crate::monitoring::actions::record_block_response_latency( + &block_info.block, + ); + } + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } } - Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } + }; // Check if there is a pending block validation that we need to submit to the node match self.signer_db.get_and_remove_pending_block_validation() {