diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 363e02044f..b07c0dc2e4 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,7 +83,8 @@ jobs: - tests::neon_integrations::start_stop_bitcoind - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::flash_blocks_on_epoch_3 + # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition + # - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb @@ -124,6 +125,7 @@ jobs: - tests::signer::v0::signing_in_0th_tenure_of_reward_cycle - tests::signer::v0::continue_after_tenure_extend - tests::signer::v0::tenure_extend_after_idle_signers + - tests::signer::v0::tenure_extend_with_other_transactions - tests::signer::v0::tenure_extend_after_idle_miner - tests::signer::v0::tenure_extend_after_failed_miner - tests::signer::v0::tenure_extend_succeeds_after_rejected_attempt @@ -133,6 +135,7 @@ jobs: - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::block_validation_check_rejection_timeout_heuristic - tests::signer::v0::block_validation_pending_table - tests::signer::v0::new_tenure_while_validating_previous_scenario - tests::signer::v0::tenure_extend_after_bad_commit @@ -169,6 +172,7 @@ jobs: - tests::nakamoto_integrations::v3_blockbyheight_api_endpoint - tests::nakamoto_integrations::mine_invalid_principal_from_consensus_buff - tests::nakamoto_integrations::test_tenure_extend_from_flashblocks + - tests::nakamoto_integrations::restarting_miner # TODO: enable these once v1 signer is supported by a new nakamoto epoch # - tests::signer::v1::dkg # - tests::signer::v1::sign_request_rejected diff --git a/CHANGELOG.md b/CHANGELOG.md index d9631ccf65..64f0bc2164 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,6 +5,23 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted + +### Changed + +- Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response (#5705) +- Miner will not issue a tenure extend until at least half of the block budget has been spent (#5757) + +### Fixed + +- Miners who restart their nodes immediately before a winning tenure now correctly detect that + they won the tenure after their nodes restart ([#5750](https://github.com/stacks-network/stacks-core/issues/5750)). + ## [3.1.0.0.4] ### Added diff --git a/Cargo.lock b/Cargo.lock index 5569bf1f88..9a39c4c10b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -606,7 +606,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -620,7 +620,7 @@ name = "clarity" version = "0.0.1" dependencies = [ "assert-json-diff 1.1.0", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "integer-sqrt", "lazy_static", "mutants", @@ -784,16 +784,15 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "4.1.2" +version = "4.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a677b8922c94e01bdbb12126b0bc852f00447528dee1782229af9c720c3f348" +checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be" dependencies = [ "cfg-if 1.0.0", "cpufeatures", "curve25519-dalek-derive", "digest 0.10.7", "fiat-crypto", - "platforms", "rustc_version 0.4.0", "subtle", "zeroize", @@ -807,7 +806,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -907,7 +906,7 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ - "curve25519-dalek 4.1.2", + "curve25519-dalek 4.1.3", "ed25519", "rand_core 0.6.4", "serde", @@ -1040,6 +1039,12 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" + [[package]] name = "form_urlencoded" version = "1.2.1" @@ -1082,9 +1087,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -1092,9 +1097,9 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" @@ -1109,9 +1114,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-lite" @@ -1143,26 +1148,26 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -1172,9 +1177,9 @@ checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -1259,9 +1264,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -1302,8 +1307,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ "ahash", - "allocator-api2", - "serde", ] [[package]] @@ -1311,6 +1314,12 @@ name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash", + "serde", +] [[package]] name = "hashlink" @@ -1486,14 +1495,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -1733,7 +1742,7 @@ name = "libsigner" version = "0.0.1" dependencies = [ "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libc", "libstackerdb", @@ -1881,9 +1890,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi 0.11.0+wasi-snapshot-preview1", @@ -2103,7 +2112,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2145,12 +2154,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" -[[package]] -name = "platforms" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" - [[package]] name = "polling" version = "2.8.0" @@ -2439,7 +2442,7 @@ checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" name = "relay-server" version = "0.0.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.15.2", ] [[package]] @@ -2453,7 +2456,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.24", + "h2 0.3.26", "http 0.2.11", "http-body 0.4.6", "hyper 0.14.28", @@ -2627,9 +2630,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring", @@ -2762,7 +2765,7 @@ checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -2831,7 +2834,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3042,7 +3045,7 @@ dependencies = [ "chrono", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libc", "nix", @@ -3073,7 +3076,7 @@ dependencies = [ "base64 0.12.3", "chrono", "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "http-types", "lazy_static", "libc", @@ -3113,7 +3116,7 @@ dependencies = [ "backoff", "clap", "clarity", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "lazy_static", "libsigner", "libstackerdb", @@ -3150,7 +3153,7 @@ dependencies = [ "clarity", "curve25519-dalek 2.0.0", "ed25519-dalek", - "hashbrown 0.14.3", + "hashbrown 0.15.2", "integer-sqrt", "lazy_static", "libc", @@ -3288,9 +3291,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -3365,7 +3368,7 @@ checksum = "ae71770322cbd277e69d762a16c444af02aa0575ac0d174f0b9562d3b37f8602" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3505,7 +3508,7 @@ dependencies = [ "backtrace", "bytes", "libc", - "mio 0.8.10", + "mio 0.8.11", "num_cpus", "parking_lot", "pin-project-lite", @@ -3595,7 +3598,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] @@ -3841,7 +3844,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -3875,7 +3878,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -4114,7 +4117,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.58", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 194e946ef4..3b9486b61d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,8 +15,8 @@ members = [ # Dependencies we want to keep the same between workspace members [workspace.dependencies] ed25519-dalek = { version = "2.1.1", features = ["serde", "rand_core"] } -hashbrown = { version = "0.14.3", features = ["serde"] } -rand_core = "0.6" +hashbrown = { version = "0.15.2", features = ["serde"] } +rand_core = "0.6.4" rand = "0.8" rand_chacha = "0.3.1" tikv-jemallocator = "0.5.4" diff --git a/stacks-common/src/address/b58.rs b/stacks-common/src/address/b58.rs index 6a135392e5..ffba441de6 100644 --- a/stacks-common/src/address/b58.rs +++ b/stacks-common/src/address/b58.rs @@ -14,7 +14,7 @@ //! Base58 encoder and decoder -use std::{error, fmt, str}; +use std::{fmt, str}; use crate::address::Error; use crate::util::hash::DoubleSha256; diff --git a/stacks-common/src/address/mod.rs b/stacks-common/src/address/mod.rs index 381456f661..8377d0087d 100644 --- a/stacks-common/src/address/mod.rs +++ b/stacks-common/src/address/mod.rs @@ -19,7 +19,7 @@ use std::{error, fmt}; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes::All as btc_opcodes; -use crate::deps_common::bitcoin::blockdata::script::{Builder, Instruction, Script}; +use crate::deps_common::bitcoin::blockdata::script::Builder; use crate::types::PublicKey; use crate::util::hash::Hash160; @@ -220,7 +220,6 @@ pub fn public_keys_to_address_hash( mod test { use super::*; use crate::util::hash::*; - use crate::util::log; use crate::util::secp256k1::Secp256k1PublicKey as PubKey; struct PubkeyFixture { diff --git a/stacks-common/src/bitvec.rs b/stacks-common/src/bitvec.rs index 7c77e5da32..065dd5e814 100644 --- a/stacks-common/src/bitvec.rs +++ b/stacks-common/src/bitvec.rs @@ -260,7 +260,6 @@ mod test { use super::BitVec; use crate::codec::StacksMessageCodec; - use crate::util::hash::to_hex; fn check_set_get(mut input: BitVec<{ u16::MAX }>) { let original_input = input.clone(); diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs index af064511b5..9a797fd846 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/block.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/block.rs @@ -25,7 +25,6 @@ use crate::deps_common::bitcoin::blockdata::transaction::Transaction; use crate::deps_common::bitcoin::network::constants::Network; use crate::deps_common::bitcoin::network::encodable::VarInt; use crate::deps_common::bitcoin::network::serialize::BitcoinHash; -use crate::deps_common::bitcoin::util; use crate::deps_common::bitcoin::util::hash::Sha256dHash; use crate::deps_common::bitcoin::util::Error; use crate::deps_common::bitcoin::util::Error::{SpvBadProofOfWork, SpvBadTarget}; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs index 34ee5897c3..cf0e3296b1 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/script.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/script.rs @@ -27,7 +27,6 @@ use std::mem::size_of; use std::{error, fmt}; -use serde; use sha2::{Digest, Sha256}; use crate::deps_common::bitcoin::blockdata::opcodes; diff --git a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs index c2d4c4e0a2..6dbf49bd5d 100644 --- a/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs +++ b/stacks-common/src/deps_common/bitcoin/blockdata/transaction.rs @@ -34,7 +34,6 @@ use crate::deps_common::bitcoin::network::serialize::{ self, serialize, BitcoinHash, SimpleDecoder, SimpleEncoder, }; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::to_hex; /// A reference to a transaction output #[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)] @@ -675,7 +674,7 @@ impl SigHashType { #[cfg(test)] mod tests { - use super::{SigHashType, Transaction, TxIn}; + use super::{Transaction, TxIn}; use crate::deps_common; use crate::deps_common::bitcoin::blockdata::script::Script; use crate::deps_common::bitcoin::network::serialize::{deserialize, BitcoinHash}; @@ -690,7 +689,6 @@ mod tests { #[test] fn test_is_coinbase() { - use crate::deps_common::bitcoin::blockdata::constants; use crate::deps_common::bitcoin::network::constants::Network; let genesis = deps_common::bitcoin::blockdata::constants::genesis_block(Network::Bitcoin); diff --git a/stacks-common/src/deps_common/bitcoin/network/message_network.rs b/stacks-common/src/deps_common/bitcoin/network/message_network.rs index 0cf486ba85..a42eb47aea 100644 --- a/stacks-common/src/deps_common/bitcoin/network/message_network.rs +++ b/stacks-common/src/deps_common/bitcoin/network/message_network.rs @@ -19,8 +19,6 @@ //! use crate::deps_common::bitcoin::network::address::Address; -use crate::deps_common::bitcoin::network::constants; -use crate::util; // Some simple messages diff --git a/stacks-common/src/deps_common/bitcoin/util/hash.rs b/stacks-common/src/deps_common/bitcoin/util/hash.rs index e1a9455e99..abfce8349f 100644 --- a/stacks-common/src/deps_common/bitcoin/util/hash.rs +++ b/stacks-common/src/deps_common/bitcoin/util/hash.rs @@ -18,7 +18,7 @@ use std::char::from_digit; use std::cmp::min; use std::io::{Cursor, Write}; -use std::{error, fmt, mem}; +use std::{fmt, mem}; use ripemd::Ripemd160; #[cfg(feature = "serde")] diff --git a/stacks-common/src/deps_common/httparse/mod.rs b/stacks-common/src/deps_common/httparse/mod.rs index b4c9250546..364fe0f8a7 100644 --- a/stacks-common/src/deps_common/httparse/mod.rs +++ b/stacks-common/src/deps_common/httparse/mod.rs @@ -30,7 +30,7 @@ //! Originally written by Sean McArthur. //! //! Modified by Jude Nelson to remove all unsafe code. -use std::{error, fmt, mem, result, str}; +use std::{fmt, mem, result, str}; macro_rules! next { ($bytes:ident) => {{ @@ -1282,8 +1282,6 @@ mod tests { #[test] fn test_std_error() { - use std::error::Error as StdError; - use super::Error; let err = Error::HeaderName; assert_eq!(err.to_string(), err.description_str()); diff --git a/stacks-common/src/libcommon.rs b/stacks-common/src/libcommon.rs index 04c3acc1ea..34705bebda 100644 --- a/stacks-common/src/libcommon.rs +++ b/stacks-common/src/libcommon.rs @@ -1,5 +1,4 @@ #![allow(unused_macros)] -#![allow(unused_imports)] #![allow(dead_code)] #![allow(non_camel_case_types)] #![allow(non_snake_case)] @@ -7,6 +6,7 @@ #![cfg_attr(test, allow(unused_variables, unused_assignments))] #![allow(clippy::assertions_on_constants)] +#[allow(unused_imports)] #[macro_use(o, slog_log, slog_trace, slog_debug, slog_info, slog_warn, slog_error)] extern crate slog; @@ -33,8 +33,6 @@ pub mod deps_common; pub mod bitvec; -use crate::types::chainstate::{BlockHeaderHash, BurnchainHeaderHash, SortitionId, StacksBlockId}; - pub mod consts { use crate::types::chainstate::{BlockHeaderHash, ConsensusHash}; pub use crate::types::MINING_COMMITMENT_WINDOW; diff --git a/stacks-common/src/types/chainstate.rs b/stacks-common/src/types/chainstate.rs index 59052ff3a9..f364800321 100644 --- a/stacks-common/src/types/chainstate.rs +++ b/stacks-common/src/types/chainstate.rs @@ -14,24 +14,19 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::fmt::{self, Display}; +use std::fmt; use std::io::{Read, Write}; use std::str::FromStr; -use curve25519_dalek::digest::Digest; -use rand::{Rng, SeedableRng}; -use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; -use sha2::{Digest as Sha2Digest, Sha256, Sha512_256}; +use sha2::{Digest as Sha2Digest, Sha512_256}; use crate::address::Error as AddressError; use crate::codec::{read_next, write_next, Error as CodecError, StacksMessageCodec}; use crate::consts::{FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH}; use crate::deps_common::bitcoin::util::hash::Sha256dHash; -use crate::util::hash::{to_hex, DoubleSha256, Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; +use crate::util::hash::{Hash160, Sha512Trunc256Sum, HASH160_ENCODED_SIZE}; use crate::util::secp256k1::{MessageSignature, Secp256k1PrivateKey, Secp256k1PublicKey}; -use crate::util::uint::Uint256; use crate::util::vrf::{VRFProof, VRF_PROOF_ENCODED_SIZE}; pub type StacksPublicKey = Secp256k1PublicKey; @@ -514,6 +509,8 @@ impl BurnchainHeaderHash { index_root: &TrieHash, noise: u64, ) -> BurnchainHeaderHash { + use crate::util::hash::DoubleSha256; + let mut bytes = vec![]; bytes.extend_from_slice(&block_height.to_be_bytes()); bytes.extend_from_slice(index_root.as_bytes()); diff --git a/stacks-common/src/types/mod.rs b/stacks-common/src/types/mod.rs index b0f1127fa9..3cb4a94fac 100644 --- a/stacks-common/src/types/mod.rs +++ b/stacks-common/src/types/mod.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use std::cell::LazyCell; use std::cmp::Ordering; use std::fmt; use std::ops::{Deref, DerefMut, Index, IndexMut}; @@ -30,7 +29,6 @@ use crate::address::{ C32_ADDRESS_VERSION_TESTNET_MULTISIG, C32_ADDRESS_VERSION_TESTNET_SINGLESIG, }; use crate::consts::MICROSTACKS_PER_STACKS; -use crate::deps_common::bitcoin::blockdata::transaction::TxOut; use crate::types::chainstate::{StacksAddress, StacksPublicKey}; use crate::util::hash::Hash160; use crate::util::secp256k1::{MessageSignature, Secp256k1PublicKey}; diff --git a/stacks-common/src/util/chunked_encoding.rs b/stacks-common/src/util/chunked_encoding.rs index d6771e2218..fd0fd22492 100644 --- a/stacks-common/src/util/chunked_encoding.rs +++ b/stacks-common/src/util/chunked_encoding.rs @@ -445,9 +445,10 @@ impl Write for HttpChunkedTransferWriter<'_, '_, W> { } } +#[cfg(test)] mod test { use std::io; - use std::io::{Read, Write}; + use std::io::Read; use rand::RngCore; diff --git a/stacks-common/src/util/db.rs b/stacks-common/src/util/db.rs index 53564af597..3a463df4f8 100644 --- a/stacks-common/src/util/db.rs +++ b/stacks-common/src/util/db.rs @@ -17,7 +17,7 @@ use std::backtrace::Backtrace; use std::sync::{LazyLock, Mutex}; use std::thread; -use std::time::{Duration, Instant}; +use std::time::Instant; use hashbrown::HashMap; use rand::{thread_rng, Rng}; diff --git a/stacks-common/src/util/hash.rs b/stacks-common/src/util/hash.rs index 666e72c8e2..85f357d21a 100644 --- a/stacks-common/src/util/hash.rs +++ b/stacks-common/src/util/hash.rs @@ -20,7 +20,6 @@ use std::{fmt, mem}; use ripemd::Ripemd160; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use sha2::{Digest, Sha256, Sha512, Sha512_256}; use sha3::Keccak256; @@ -29,7 +28,7 @@ use crate::types::StacksPublicKeyBuffer; use crate::util::pair::*; use crate::util::secp256k1::Secp256k1PublicKey; use crate::util::uint::Uint256; -use crate::util::{log, HexError}; +use crate::util::HexError; // hash function for Merkle trees pub trait MerkleHashFunc { @@ -659,9 +658,7 @@ pub fn bytes_to_hex(s: &[u8]) -> String { #[cfg(test)] mod test { - use super::{ - bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerklePath, MerkleTree, - }; + use super::{bin_bytes, hex_bytes, to_bin, DoubleSha256, MerkleHashFunc, MerkleTree}; struct MerkleTreeFixture { data: Vec>, diff --git a/stacks-common/src/util/log.rs b/stacks-common/src/util/log.rs index b0ac704f0c..77a4950f81 100644 --- a/stacks-common/src/util/log.rs +++ b/stacks-common/src/util/log.rs @@ -15,13 +15,12 @@ // along with this program. If not, see . use std::io::Write; -use std::sync::Mutex; use std::time::{Duration, SystemTime}; use std::{env, io, thread}; use chrono::prelude::*; use lazy_static::lazy_static; -use slog::{BorrowedKV, Drain, FnValue, Level, Logger, OwnedKVList, Record, KV}; +use slog::{Drain, Level, Logger, OwnedKVList, Record, KV}; use slog_term::{CountingWriter, Decorator, RecordDecorator, Serializer}; lazy_static! { @@ -191,6 +190,10 @@ impl TermFormat { #[cfg(feature = "slog_json")] fn make_json_logger() -> Logger { + use std::sync::Mutex; + + use slog::FnValue; + let def_keys = o!("file" => FnValue(move |info| { info.file() }), diff --git a/stacks-common/src/util/mod.rs b/stacks-common/src/util/mod.rs index 95ca7eeec0..46158d2f4f 100644 --- a/stacks-common/src/util/mod.rs +++ b/stacks-common/src/util/mod.rs @@ -28,15 +28,15 @@ pub mod secp256k1; pub mod uint; pub mod vrf; -use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, BufWriter, Write}; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::time::{SystemTime, UNIX_EPOCH}; use std::{error, fmt, thread, time}; /// Given a relative path inside the Cargo workspace, return the absolute path -pub fn cargo_workspace

(relative_path: P) -> PathBuf +#[cfg(any(test, feature = "testing"))] +pub fn cargo_workspace

(relative_path: P) -> std::path::PathBuf where P: AsRef, { diff --git a/stacks-common/src/util/pipe.rs b/stacks-common/src/util/pipe.rs index 86d92abd61..4407fee71f 100644 --- a/stacks-common/src/util/pipe.rs +++ b/stacks-common/src/util/pipe.rs @@ -21,8 +21,6 @@ use std::io; use std::io::{Read, Write}; use std::sync::mpsc::{sync_channel, Receiver, SyncSender, TryRecvError, TrySendError}; -use crate::util::log; - /// Inter-thread pipe for streaming messages, built on channels. /// Used mainly in conjunction with networking. /// @@ -316,7 +314,6 @@ impl Write for PipeWrite { #[cfg(test)] mod test { - use std::io::prelude::*; use std::io::{Read, Write}; use std::{io, thread}; @@ -324,7 +321,6 @@ mod test { use rand::RngCore; use super::*; - use crate::util::*; #[test] fn test_connection_pipe_oneshot() { diff --git a/stacks-common/src/util/retry.rs b/stacks-common/src/util/retry.rs index e7f6c0b140..47801289a3 100644 --- a/stacks-common/src/util/retry.rs +++ b/stacks-common/src/util/retry.rs @@ -18,11 +18,7 @@ */ use std::io; -use std::io::prelude::*; -use std::io::{Read, Write}; - -use crate::util::hash::to_hex; -use crate::util::log; +use std::io::Read; /// Wrap a Read so that we store a copy of what was read. /// Used for re-trying reads when we don't know what to expect from the stream. diff --git a/stacks-common/src/util/secp256k1.rs b/stacks-common/src/util/secp256k1.rs index e569a8ba0d..5d99b2c663 100644 --- a/stacks-common/src/util/secp256k1.rs +++ b/stacks-common/src/util/secp256k1.rs @@ -13,7 +13,7 @@ // // You should have received a copy of the GNU General Public License // along with this program. If not, see . -use rand::{thread_rng, RngCore}; +use rand::RngCore; use secp256k1; use secp256k1::ecdsa::{ RecoverableSignature as LibSecp256k1RecoverableSignature, RecoveryId as LibSecp256k1RecoveryID, @@ -24,11 +24,9 @@ use secp256k1::{ PublicKey as LibSecp256k1PublicKey, Secp256k1, SecretKey as LibSecp256k1PrivateKey, }; use serde::de::{Deserialize, Error as de_Error}; -use serde::ser::Error as ser_Error; use serde::Serialize; use super::hash::Sha256Sum; -use crate::impl_byte_array_message_codec; use crate::types::{PrivateKey, PublicKey}; use crate::util::hash::{hex_bytes, to_hex}; @@ -436,8 +434,8 @@ mod tests { use secp256k1::{PublicKey as LibSecp256k1PublicKey, Secp256k1}; use super::*; + use crate::util::get_epoch_time_ms; use crate::util::hash::hex_bytes; - use crate::util::{get_epoch_time_ms, log}; struct KeyFixture { input: I, diff --git a/stacks-common/src/util/vrf.rs b/stacks-common/src/util/vrf.rs index 0c2b2c3dad..5c7439daf9 100644 --- a/stacks-common/src/util/vrf.rs +++ b/stacks-common/src/util/vrf.rs @@ -22,16 +22,11 @@ use std::fmt::Debug; use std::hash::{Hash, Hasher}; /// This codebase is based on routines defined in the IETF draft for verifiable random functions /// over elliptic curves (https://tools.ietf.org/id/draft-irtf-cfrg-vrf-02.html). -use std::ops::Deref; -use std::ops::DerefMut; use std::{error, fmt}; use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint}; use curve25519_dalek::scalar::Scalar as ed25519_Scalar; -use ed25519_dalek::{ - SecretKey as EdDalekSecretKeyBytes, SigningKey as EdPrivateKey, VerifyingKey as EdPublicKey, -}; use rand; use sha2::{Digest, Sha512}; @@ -535,10 +530,8 @@ impl VRF { #[cfg(test)] mod tests { - use curve25519_dalek::scalar::Scalar as ed25519_Scalar; use rand; use rand::RngCore; - use sha2::Sha512; use super::*; use crate::util::hash::hex_bytes; diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 2697d93508..df30e0d0db 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -5,6 +5,16 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to the versioning scheme outlined in the [README.md](README.md). +## [Unreleased] + +### Added + +- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will + run the signer binary as if it were a registered signer. Instead of broadcasting + `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` + behave normally (e.g., submitting validation requests, submitting finished blocks). A + dry run signer will error out if the supplied key is actually a registered signer. + ## [3.1.0.0.4.0] ## Added diff --git a/stacks-signer/src/client/mod.rs b/stacks-signer/src/client/mod.rs index a3d9bed159..8e163ac319 100644 --- a/stacks-signer/src/client/mod.rs +++ b/stacks-signer/src/client/mod.rs @@ -144,7 +144,7 @@ pub(crate) mod tests { use stacks_common::util::hash::{Hash160, Sha256Sum}; use super::*; - use crate::config::{GlobalConfig, SignerConfig}; + use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; pub struct MockServerClient { pub server: TcpListener, @@ -393,8 +393,10 @@ pub(crate) mod tests { } SignerConfig { reward_cycle, - signer_id: 0, - signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers + signer_mode: SignerConfigMode::Normal { + signer_id: 0, + signer_slot_id: SignerSlotID(rand::thread_rng().gen_range(0..num_signers)), // Give a random signer slot id between 0 and num_signers + }, signer_entries: SignerEntries { signer_addr_to_id, signer_id_to_pk, diff --git a/stacks-signer/src/client/stackerdb.rs b/stacks-signer/src/client/stackerdb.rs index dc6525b144..81799dcc88 100644 --- a/stacks-signer/src/client/stackerdb.rs +++ b/stacks-signer/src/client/stackerdb.rs @@ -19,12 +19,13 @@ use clarity::codec::read_next; use hashbrown::HashMap; use libsigner::{MessageSlotID, SignerMessage, SignerSession, StackerDBSession}; use libstackerdb::{StackerDBChunkAckData, StackerDBChunkData}; -use slog::{slog_debug, slog_warn}; +use slog::{slog_debug, slog_info, slog_warn}; use stacks_common::types::chainstate::StacksPrivateKey; -use stacks_common::{debug, warn}; +use stacks_common::util::hash::to_hex; +use stacks_common::{debug, info, warn}; use crate::client::{retry_with_exponential_backoff, ClientError}; -use crate::config::SignerConfig; +use crate::config::{SignerConfig, SignerConfigMode}; /// The signer StackerDB slot ID, purposefully wrapped to prevent conflation with SignerID #[derive(Debug, Clone, PartialEq, Eq, Hash, Copy, PartialOrd, Ord)] @@ -36,6 +37,12 @@ impl std::fmt::Display for SignerSlotID { } } +#[derive(Debug)] +enum StackerDBMode { + DryRun, + Normal { signer_slot_id: SignerSlotID }, +} + /// The StackerDB client for communicating with the .signers contract #[derive(Debug)] pub struct StackerDB { @@ -46,32 +53,60 @@ pub struct StackerDB { stacks_private_key: StacksPrivateKey, /// A map of a message ID to last chunk version for each session slot_versions: HashMap>, - /// The signer slot ID -- the index into the signer list for this signer daemon's signing key. - signer_slot_id: SignerSlotID, + /// The running mode of the stackerdb (whether the signer is running in dry-run or + /// normal operation) + mode: StackerDBMode, /// The reward cycle of the connecting signer reward_cycle: u64, } impl From<&SignerConfig> for StackerDB { fn from(config: &SignerConfig) -> Self { + let mode = match config.signer_mode { + SignerConfigMode::DryRun => StackerDBMode::DryRun, + SignerConfigMode::Normal { + ref signer_slot_id, .. + } => StackerDBMode::Normal { + signer_slot_id: *signer_slot_id, + }, + }; + Self::new( &config.node_host, config.stacks_private_key, config.mainnet, config.reward_cycle, - config.signer_slot_id, + mode, ) } } impl StackerDB { - /// Create a new StackerDB client - pub fn new( + #[cfg(any(test, feature = "testing"))] + /// Create a StackerDB client in normal operation (i.e., not a dry-run signer) + pub fn new_normal( host: &str, stacks_private_key: StacksPrivateKey, is_mainnet: bool, reward_cycle: u64, signer_slot_id: SignerSlotID, + ) -> Self { + Self::new( + host, + stacks_private_key, + is_mainnet, + reward_cycle, + StackerDBMode::Normal { signer_slot_id }, + ) + } + + /// Create a new StackerDB client + fn new( + host: &str, + stacks_private_key: StacksPrivateKey, + is_mainnet: bool, + reward_cycle: u64, + signer_mode: StackerDBMode, ) -> Self { let mut signers_message_stackerdb_sessions = HashMap::new(); for msg_id in M::all() { @@ -84,7 +119,7 @@ impl StackerDB { signers_message_stackerdb_sessions, stacks_private_key, slot_versions: HashMap::new(), - signer_slot_id, + mode: signer_mode, reward_cycle, } } @@ -110,18 +145,33 @@ impl StackerDB { msg_id: &M, message_bytes: Vec, ) -> Result { - let slot_id = self.signer_slot_id; + let StackerDBMode::Normal { + signer_slot_id: slot_id, + } = &self.mode + else { + info!( + "Dry-run signer would have sent a stackerdb message"; + "message_id" => ?msg_id, + "message_bytes" => to_hex(&message_bytes) + ); + return Ok(StackerDBChunkAckData { + accepted: true, + reason: None, + metadata: None, + code: None, + }); + }; loop { let mut slot_version = if let Some(versions) = self.slot_versions.get_mut(msg_id) { - if let Some(version) = versions.get(&slot_id) { + if let Some(version) = versions.get(slot_id) { *version } else { - versions.insert(slot_id, 0); + versions.insert(*slot_id, 0); 1 } } else { let mut versions = HashMap::new(); - versions.insert(slot_id, 0); + versions.insert(*slot_id, 0); self.slot_versions.insert(*msg_id, versions); 1 }; @@ -143,7 +193,7 @@ impl StackerDB { if let Some(versions) = self.slot_versions.get_mut(msg_id) { // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); + versions.insert(*slot_id, slot_version.saturating_add(1)); } else { return Err(ClientError::NotConnected); } @@ -165,7 +215,7 @@ impl StackerDB { } if let Some(versions) = self.slot_versions.get_mut(msg_id) { // NOTE: per the above, this is always executed - versions.insert(slot_id, slot_version.saturating_add(1)); + versions.insert(*slot_id, slot_version.saturating_add(1)); } else { return Err(ClientError::NotConnected); } @@ -216,11 +266,6 @@ impl StackerDB { u32::try_from(self.reward_cycle % 2).expect("FATAL: reward cycle % 2 exceeds u32::MAX") } - /// Retrieve the signer slot ID - pub fn get_signer_slot_id(&self) -> SignerSlotID { - self.signer_slot_id - } - /// Get the session corresponding to the given message ID if it exists pub fn get_session_mut(&mut self, msg_id: &M) -> Option<&mut StackerDBSession> { self.signers_message_stackerdb_sessions.get_mut(msg_id) diff --git a/stacks-signer/src/config.rs b/stacks-signer/src/config.rs index a50ca7ecf8..29ee35c961 100644 --- a/stacks-signer/src/config.rs +++ b/stacks-signer/src/config.rs @@ -39,6 +39,7 @@ const BLOCK_PROPOSAL_TIMEOUT_MS: u64 = 600_000; const BLOCK_PROPOSAL_VALIDATION_TIMEOUT_MS: u64 = 120_000; const DEFAULT_FIRST_PROPOSAL_BURN_BLOCK_TIMING_SECS: u64 = 60; const DEFAULT_TENURE_LAST_BLOCK_PROPOSAL_TIMEOUT_SECS: u64 = 30; +const DEFAULT_DRY_RUN: bool = false; const TENURE_IDLE_TIMEOUT_SECS: u64 = 120; #[derive(thiserror::Error, Debug)] @@ -106,15 +107,36 @@ impl Network { } } +/// Signer config mode (whether dry-run or real) +#[derive(Debug, Clone)] +pub enum SignerConfigMode { + /// Dry run operation: signer is not actually registered, the signer + /// will not submit stackerdb messages, etc. + DryRun, + /// Normal signer operation: if registered, the signer will submit + /// stackerdb messages, etc. + Normal { + /// The signer ID assigned to this signer (may be different from signer_slot_id) + signer_id: u32, + /// The signer stackerdb slot id (may be different from signer_id) + signer_slot_id: SignerSlotID, + }, +} + +impl std::fmt::Display for SignerConfigMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SignerConfigMode::DryRun => write!(f, "Dry-Run signer"), + SignerConfigMode::Normal { signer_id, .. } => write!(f, "signer #{signer_id}"), + } + } +} + /// The Configuration info needed for an individual signer per reward cycle #[derive(Debug, Clone)] pub struct SignerConfig { /// The reward cycle of the configuration pub reward_cycle: u64, - /// The signer ID assigned to this signer (may be different from signer_slot_id) - pub signer_id: u32, - /// The signer stackerdb slot id (may be different from signer_id) - pub signer_slot_id: SignerSlotID, /// The registered signers for this reward cycle pub signer_entries: SignerEntries, /// The signer slot ids of all signers registered for this reward cycle @@ -141,6 +163,8 @@ pub struct SignerConfig { pub tenure_idle_timeout: Duration, /// The maximum age of a block proposal in seconds that will be processed by the signer pub block_proposal_max_age_secs: u64, + /// The running mode for the signer (dry-run or normal) + pub signer_mode: SignerConfigMode, } /// The parsed configuration for the signer @@ -181,6 +205,8 @@ pub struct GlobalConfig { pub tenure_idle_timeout: Duration, /// The maximum age of a block proposal that will be processed by the signer pub block_proposal_max_age_secs: u64, + /// Is this signer binary going to be running in dry-run mode? + pub dry_run: bool, } /// Internal struct for loading up the config file @@ -220,6 +246,8 @@ struct RawConfigFile { pub tenure_idle_timeout_secs: Option, /// The maximum age of a block proposal (in secs) that will be processed by the signer. pub block_proposal_max_age_secs: Option, + /// Is this signer binary going to be running in dry-run mode? + pub dry_run: Option, } impl RawConfigFile { @@ -321,6 +349,8 @@ impl TryFrom for GlobalConfig { .block_proposal_max_age_secs .unwrap_or(DEFAULT_BLOCK_PROPOSAL_MAX_AGE_SECS); + let dry_run = raw_data.dry_run.unwrap_or(DEFAULT_DRY_RUN); + Ok(Self { node_host: raw_data.node_host, endpoint, @@ -338,6 +368,7 @@ impl TryFrom for GlobalConfig { block_proposal_validation_timeout, tenure_idle_timeout, block_proposal_max_age_secs, + dry_run, }) } } diff --git a/stacks-signer/src/runloop.rs b/stacks-signer/src/runloop.rs index 84c1c592f5..96223b39a0 100644 --- a/stacks-signer/src/runloop.rs +++ b/stacks-signer/src/runloop.rs @@ -25,7 +25,7 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::SortitionsView; use crate::client::{retry_with_exponential_backoff, ClientError, StacksClient}; -use crate::config::{GlobalConfig, SignerConfig}; +use crate::config::{GlobalConfig, SignerConfig, SignerConfigMode}; #[cfg(any(test, feature = "testing"))] use crate::v0::tests::TEST_SKIP_SIGNER_CLEANUP; use crate::Signer as SignerTrait; @@ -39,6 +39,9 @@ pub enum ConfigurationError { /// The stackerdb signer config is not yet updated #[error("The stackerdb config is not yet updated")] StackerDBNotUpdated, + /// The signer binary is configured as dry-run, but is also registered for this cycle + #[error("The signer binary is configured as dry-run, but is also registered for this cycle")] + DryRunStackerIsRegistered, } /// The internal signer state info @@ -258,27 +261,48 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo warn!("Error while fetching stackerdb slots {reward_cycle}: {e:?}"); e })?; + + let dry_run = self.config.dry_run; let current_addr = self.stacks_client.get_signer_address(); - let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { - warn!( + let signer_config_mode = if !dry_run { + let Some(signer_slot_id) = signer_slot_ids.get(current_addr) else { + warn!( "Signer {current_addr} was not found in stacker db. Must not be registered for this reward cycle {reward_cycle}." ); - return Ok(None); - }; - let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { - warn!( - "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + return Ok(None); + }; + let Some(signer_id) = signer_entries.signer_addr_to_id.get(current_addr) else { + warn!( + "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + ); + return Ok(None); + }; + info!( + "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." ); - return Ok(None); + SignerConfigMode::Normal { + signer_slot_id: *signer_slot_id, + signer_id: *signer_id, + } + } else { + if signer_slot_ids.contains_key(current_addr) { + error!( + "Signer is configured for dry-run, but the signer address {current_addr} was found in stacker db." + ); + return Err(ConfigurationError::DryRunStackerIsRegistered); + }; + if signer_entries.signer_addr_to_id.contains_key(current_addr) { + warn!( + "Signer {current_addr} was found in stacker db but not the reward set for reward cycle {reward_cycle}." + ); + return Ok(None); + }; + SignerConfigMode::DryRun }; - info!( - "Signer #{signer_id} ({current_addr}) is registered for reward cycle {reward_cycle}." - ); Ok(Some(SignerConfig { reward_cycle, - signer_id: *signer_id, - signer_slot_id: *signer_slot_id, + signer_mode: signer_config_mode, signer_entries, signer_slot_ids: signer_slot_ids.into_values().collect(), first_proposal_burn_block_timing: self.config.first_proposal_burn_block_timing, @@ -299,9 +323,9 @@ impl, T: StacksMessageCodec + Clone + Send + Debug> RunLo let reward_index = reward_cycle % 2; let new_signer_config = match self.get_signer_config(reward_cycle) { Ok(Some(new_signer_config)) => { - let signer_id = new_signer_config.signer_id; + let signer_mode = new_signer_config.signer_mode.clone(); let new_signer = Signer::new(new_signer_config); - info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as signer #{signer_id}. Initialized signer state."); + info!("{new_signer} Signer is registered for reward cycle {reward_cycle} as {signer_mode}. Initialized signer state."); ConfiguredSigner::RegisteredSigner(new_signer) } Ok(None) => { diff --git a/stacks-signer/src/v0/signer.rs b/stacks-signer/src/v0/signer.rs index 70253f8258..4cabbe7da1 100644 --- a/stacks-signer/src/v0/signer.rs +++ b/stacks-signer/src/v0/signer.rs @@ -39,11 +39,25 @@ use stacks_common::{debug, error, info, warn}; use crate::chainstate::{ProposalEvalConfig, SortitionsView}; use crate::client::{ClientError, SignerSlotID, StackerDB, StacksClient}; -use crate::config::SignerConfig; +use crate::config::{SignerConfig, SignerConfigMode}; use crate::runloop::SignerResult; use crate::signerdb::{BlockInfo, BlockState, SignerDb}; use crate::Signer as SignerTrait; +/// Signer running mode (whether dry-run or real) +#[derive(Debug)] +pub enum SignerMode { + /// Dry run operation: signer is not actually registered, the signer + /// will not submit stackerdb messages, etc. + DryRun, + /// Normal signer operation: if registered, the signer will submit + /// stackerdb messages, etc. + Normal { + /// The signer ID assigned to this signer (may be different from signer_slot_id) + signer_id: u32, + }, +} + /// The stacks signer registered for the reward cycle #[derive(Debug)] pub struct Signer { @@ -57,8 +71,8 @@ pub struct Signer { pub stackerdb: StackerDB, /// Whether the signer is a mainnet signer or not pub mainnet: bool, - /// The signer id - pub signer_id: u32, + /// The running mode of the signer (whether dry-run or normal) + pub mode: SignerMode, /// The signer slot ids for the signers in the reward cycle pub signer_slot_ids: Vec, /// The addresses of other signers @@ -80,9 +94,18 @@ pub struct Signer { pub block_proposal_max_age_secs: u64, } +impl std::fmt::Display for SignerMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + SignerMode::DryRun => write!(f, "Dry-Run signer"), + SignerMode::Normal { signer_id } => write!(f, "Signer #{signer_id}"), + } + } +} + impl std::fmt::Display for Signer { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Cycle #{} Signer #{}", self.reward_cycle, self.signer_id,) + write!(f, "Cycle #{} {}", self.reward_cycle, self.mode) } } @@ -275,10 +298,13 @@ impl SignerTrait for Signer { impl From for Signer { fn from(signer_config: SignerConfig) -> Self { let stackerdb = StackerDB::from(&signer_config); - debug!( - "Reward cycle #{} Signer #{}", - signer_config.reward_cycle, signer_config.signer_id, - ); + let mode = match signer_config.signer_mode { + SignerConfigMode::DryRun => SignerMode::DryRun, + SignerConfigMode::Normal { signer_id, .. } => SignerMode::Normal { signer_id }, + }; + + debug!("Reward cycle #{} {mode}", signer_config.reward_cycle); + let signer_db = SignerDb::new(&signer_config.db_path).expect("Failed to connect to signer Db"); let proposal_config = ProposalEvalConfig::from(&signer_config); @@ -287,7 +313,7 @@ impl From for Signer { private_key: signer_config.stacks_private_key, stackerdb, mainnet: signer_config.mainnet, - signer_id: signer_config.signer_id, + mode, signer_addresses: signer_config.signer_entries.signer_addresses.clone(), signer_weights: signer_config.signer_entries.signer_addr_to_weight.clone(), signer_slot_ids: signer_config.signer_slot_ids.clone(), @@ -818,31 +844,32 @@ impl Signer { .remove_pending_block_validation(&signer_sig_hash) .unwrap_or_else(|e| warn!("{self}: Failed to remove pending block validation: {e:?}")); - let Some(response) = block_response else { - return; - }; - // Submit a proposal response to the .signers contract for miners - info!( - "{self}: Broadcasting a block response to stacks node: {response:?}"; - ); - let accepted = matches!(response, BlockResponse::Accepted(..)); - match self - .stackerdb - .send_message_with_retry::(response.into()) - { - Ok(_) => { - crate::monitoring::actions::increment_block_responses_sent(accepted); - if let Ok(Some(block_info)) = self - .signer_db - .block_lookup(&block_validate_response.signer_signature_hash()) - { - crate::monitoring::actions::record_block_response_latency(&block_info.block); + if let Some(response) = block_response { + // Submit a proposal response to the .signers contract for miners + info!( + "{self}: Broadcasting a block response to stacks node: {response:?}"; + ); + let accepted = matches!(response, BlockResponse::Accepted(..)); + match self + .stackerdb + .send_message_with_retry::(response.into()) + { + Ok(_) => { + crate::monitoring::actions::increment_block_responses_sent(accepted); + if let Ok(Some(block_info)) = self + .signer_db + .block_lookup(&block_validate_response.signer_signature_hash()) + { + crate::monitoring::actions::record_block_response_latency( + &block_info.block, + ); + } + } + Err(e) => { + warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); } } - Err(e) => { - warn!("{self}: Failed to send block rejection to stacker-db: {e:?}",); - } - } + }; // Check if there is a pending block validation that we need to submit to the node match self.signer_db.get_and_remove_pending_block_validation() { diff --git a/stackslib/src/blockstack_cli.rs b/stackslib/src/blockstack_cli.rs index 13bf8596cc..b46daa4826 100644 --- a/stackslib/src/blockstack_cli.rs +++ b/stackslib/src/blockstack_cli.rs @@ -48,7 +48,6 @@ use clarity::vm::{ClarityName, ClarityVersion, ContractName, Value}; use stacks_common::address::{b58, AddressHashMode}; use stacks_common::codec::{Error as CodecError, StacksMessageCodec}; use stacks_common::types::chainstate::StacksAddress; -use stacks_common::util::cargo_workspace; use stacks_common::util::hash::{hex_bytes, to_hex}; use stacks_common::util::retry::LogReader; @@ -897,6 +896,8 @@ fn main_handler(mut argv: Vec) -> Result { #[cfg(test)] mod test { + use stacks_common::util::cargo_workspace; + use super::*; #[test] diff --git a/stackslib/src/burnchains/bitcoin/address.rs b/stackslib/src/burnchains/bitcoin/address.rs index ae63aa98c3..97b9dc67a8 100644 --- a/stackslib/src/burnchains/bitcoin/address.rs +++ b/stackslib/src/burnchains/bitcoin/address.rs @@ -302,9 +302,8 @@ impl SegwitBitcoinAddress { pub fn from_bech32(s: &str) -> Option { let (hrp, quintets, variant) = bech32::decode(s) - .map_err(|e| { - test_debug!("Failed to decode '{}': {:?}", s, &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode '{s}': {_e:?}"); }) .ok()?; @@ -327,9 +326,8 @@ impl SegwitBitcoinAddress { prog.append(&mut quintets[1..].to_vec()); let bytes = Vec::from_base32(&prog) - .map_err(|e| { - test_debug!("Failed to decode quintets: {:?}", &e); - e + .inspect_err(|_e| { + test_debug!("Failed to decode quintets: {_e:?}"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/bits.rs b/stackslib/src/burnchains/bitcoin/bits.rs index 884f07a171..2a9745af25 100644 --- a/stackslib/src/burnchains/bitcoin/bits.rs +++ b/stackslib/src/burnchains/bitcoin/bits.rs @@ -112,22 +112,15 @@ impl BitcoinTxInputStructured { Instruction::PushBytes(payload) => payload, _ => { // not pushbytes, so this can't be a multisig script - test_debug!( - "Not a multisig script: Instruction {} is not a PushBytes", - i - ); + test_debug!("Not a multisig script: Instruction {i} is not a PushBytes"); return None; } }; let pubk = BitcoinPublicKey::from_slice(payload) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: pushbytes {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: pushbytes {i} is not a public key ({e:?})"); }) .ok()?; @@ -169,13 +162,9 @@ impl BitcoinTxInputStructured { for i in 0..pubkey_vecs.len() { let payload = &pubkey_vecs[i]; let pubk = BitcoinPublicKey::from_slice(&payload[..]) - .map_err(|e| { + .inspect_err(|&e| { // not a public key - warn!( - "Not a multisig script: item {} is not a public key ({:?})", - i, e - ); - e + warn!("Not a multisig script: item {i} is not a public key ({e:?})"); }) .ok()?; diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index fe0740e566..8f04d08a66 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -265,40 +265,31 @@ impl BitcoinIndexer { Ok(s) => { // Disable Nagle algorithm s.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + test_debug!("Failed to set TCP_NODELAY: {_e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) .map_err(|_e| { - test_debug!("Failed to set TCP read timeout: {:?}", &_e); + test_debug!("Failed to set TCP read timeout: {_e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) .map_err(|_e| { - test_debug!("Failed to set TCP write timeout: {:?}", &_e); + test_debug!("Failed to set TCP write timeout: {_e:?}"); btc_error::ConnectionError })?; - match self.runtime.sock.take() { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s_old) = self.runtime.sock.replace(s) { + let _ = s_old.shutdown(Shutdown::Both); } - - self.runtime.sock = Some(s); Ok(()) } Err(_e) => { - let s = self.runtime.sock.take(); - match s { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = self.runtime.sock.take() { + let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) } @@ -627,12 +618,8 @@ impl BitcoinIndexer { )?; // what's the last header we have from the canonical history? - let canonical_end_block = orig_spv_client.get_headers_height().map_err(|e| { - error!( - "Failed to get the last block from {}", - canonical_headers_path - ); - e + let canonical_end_block = orig_spv_client.get_headers_height().inspect_err(|_e| { + error!("Failed to get the last block from {canonical_headers_path}"); })?; // bootstrap reorg client @@ -694,13 +681,12 @@ impl BitcoinIndexer { let reorg_headers = reorg_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read reorg Bitcoin headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; if reorg_headers.is_empty() { @@ -724,13 +710,12 @@ impl BitcoinIndexer { // got reorg headers. Find the equivalent headers in our canonical history let canonical_headers = orig_spv_client .read_block_headers(start_block, start_block + REORG_BATCH_SIZE) - .map_err(|e| { + .inspect_err(|_e| { error!( "Failed to read canonical headers from {} to {}", start_block, start_block + REORG_BATCH_SIZE ); - e })?; assert!( @@ -932,11 +917,8 @@ impl BitcoinIndexer { impl Drop for BitcoinIndexer { fn drop(&mut self) { - match self.runtime.sock { - Some(ref mut s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(ref mut s) = self.runtime.sock { + let _ = s.shutdown(Shutdown::Both); } } } diff --git a/stackslib/src/burnchains/bitcoin/spv.rs b/stackslib/src/burnchains/bitcoin/spv.rs index fff8eaa06f..d12b261be9 100644 --- a/stackslib/src/burnchains/bitcoin/spv.rs +++ b/stackslib/src/burnchains/bitcoin/spv.rs @@ -832,10 +832,7 @@ impl SpvClient { // fetching headers in ascending order, so verify that the first item in // `block_headers` connects to a parent in the DB (if it has one) self.insert_block_headers_after(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let chain_tip = self.get_headers_height()?; @@ -843,22 +840,15 @@ impl SpvClient { (insert_height.saturating_sub(1)) / BLOCK_DIFFICULTY_CHUNK_SIZE, chain_tip / BLOCK_DIFFICULTY_CHUNK_SIZE + 1, ) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } else { // fetching headers in descending order, so verify that the last item in // `block_headers` connects to a child in the DB (if it has one) let headers_len = block_headers.len() as u64; self.insert_block_headers_before(insert_height, block_headers) - .map_err(|e| { - error!("Failed to insert block headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Failed to insert block headers: {e:?}"))?; // check work let interval_start = if insert_height % BLOCK_DIFFICULTY_CHUNK_SIZE == 0 { @@ -870,12 +860,8 @@ impl SpvClient { let interval_end = (insert_height + 1 + headers_len) / BLOCK_DIFFICULTY_CHUNK_SIZE + 1; self.validate_header_work(interval_start, interval_end) - .map_err(|e| { - error!( - "Received headers with bad target, difficulty, or continuity: {:?}", - &e - ); - e + .inspect_err(|e| { + error!("Received headers with bad target, difficulty, or continuity: {e:?}") })?; } @@ -883,16 +869,12 @@ impl SpvClient { let total_work_after = self.update_chain_work()?; if total_work_after < total_work_before { error!( - "New headers represent less work than the old headers ({} < {})", - total_work_before, total_work_after + "New headers represent less work than the old headers ({total_work_before} < {total_work_after})" ); return Err(btc_error::InvalidChainWork); } - debug!( - "Handled {} Headers: {}-{}", - num_headers, first_header_hash, last_header_hash - ); + debug!("Handled {num_headers} Headers: {first_header_hash}-{last_header_hash}"); } else { debug!("Handled empty header reply"); } @@ -956,22 +938,16 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; - - let parent_header = match self.read_block_header(start_height)? { - Some(header) => header, - None => { - warn!( - "No header for block {} -- cannot insert {} headers into {}", - start_height, - block_headers.len(), - self.headers_path - ); - return Err(btc_error::NoncontiguousHeader); - } + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; + + let Some(parent_header) = self.read_block_header(start_height)? else { + warn!( + "No header for block {} -- cannot insert {} headers into {}", + start_height, + block_headers.len(), + self.headers_path + ); + return Err(btc_error::NoncontiguousHeader); }; // contiguous? @@ -1010,10 +986,7 @@ impl SpvClient { ); SpvClient::validate_header_integrity(start_height, &block_headers, self.check_txcount) - .map_err(|e| { - error!("Received invalid headers: {:?}", &e); - e - })?; + .inspect_err(|e| error!("Received invalid headers: {e:?}"))?; match self.read_block_header(end_height)? { Some(child_header) => { @@ -1028,10 +1001,7 @@ impl SpvClient { None => { // if we're inserting headers in reverse order, we're not guaranteed to have the // child. - debug!( - "No header for child block {}, so will not validate continuity", - end_height - ); + debug!("No header for child block {end_height}, so will not validate continuity"); } } diff --git a/stackslib/src/burnchains/burnchain.rs b/stackslib/src/burnchains/burnchain.rs index 18fb27e27e..8bc7289ec2 100644 --- a/stackslib/src/burnchains/burnchain.rs +++ b/stackslib/src/burnchains/burnchain.rs @@ -683,11 +683,12 @@ impl Burnchain { if headers_height == 0 || headers_height < self.first_block_height { debug!("Fetch initial headers"); - indexer.sync_headers(headers_height, None).map_err(|e| { - error!("Failed to sync initial headers"); - sleep_ms(100); - e - })?; + indexer + .sync_headers(headers_height, None) + .inspect_err(|_e| { + error!("Failed to sync initial headers"); + sleep_ms(100); + })?; } Ok(()) } @@ -1137,13 +1138,9 @@ impl Burnchain { let headers_path = indexer.get_headers_path(); // sanity check -- what is the height of our highest header - let headers_height = indexer.get_highest_header_height().map_err(|e| { - error!( - "Failed to read headers height from {}: {:?}", - headers_path, &e - ); - e - })?; + let headers_height = indexer + .get_highest_header_height() + .inspect_err(|e| error!("Failed to read headers height from {headers_path}: {e:?}"))?; if headers_height == 0 { return Ok((0, false)); @@ -1152,16 +1149,12 @@ impl Burnchain { // did we encounter a reorg since last sync? Find the highest common ancestor of the // remote bitcoin peer's chain state. // Note that this value is 0-indexed -- the smallest possible value it returns is 0. - let reorg_height = indexer.find_chain_reorg().map_err(|e| { - error!("Failed to check for reorgs from {}: {:?}", headers_path, &e); - e - })?; + let reorg_height = indexer + .find_chain_reorg() + .inspect_err(|e| error!("Failed to check for reorgs from {headers_path}: {e:?}"))?; if reorg_height < headers_height { - warn!( - "Burnchain reorg detected: highest common ancestor at height {}", - reorg_height - ); + warn!("Burnchain reorg detected: highest common ancestor at height {reorg_height}"); return Ok((reorg_height, true)); } else { // no reorg diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index cccb6874e9..f8343ae898 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1193,9 +1193,10 @@ impl BurnchainDB { let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); for op in ops { - if let Some(_) = indexer + if indexer .find_burnchain_header_height(&op.burn_header_hash()) .expect("FATAL: burnchain DB query error") + .is_some() { // this is the op on the canonical fork return Some(op); diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs index 7cd80c7a38..38fec5fee3 100644 --- a/stackslib/src/burnchains/tests/burnchain.rs +++ b/stackslib/src/burnchains/tests/burnchain.rs @@ -694,19 +694,14 @@ fn test_burn_snapshot_sequence() { initial_reward_start_block: first_block_height, }; - let mut leader_private_keys = vec![]; let mut leader_public_keys = vec![]; let mut leader_bitcoin_public_keys = vec![]; - let mut leader_bitcoin_addresses = vec![]; for i in 0..32 { let mut csprng: ThreadRng = thread_rng(); let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::generate(&mut csprng)); let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey); - let privkey_hex = vrf_privkey.to_hex(); - leader_private_keys.push(privkey_hex); - let pubkey_hex = vrf_pubkey.to_hex(); leader_public_keys.push(pubkey_hex); @@ -714,12 +709,6 @@ fn test_burn_snapshot_sequence() { let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey); leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes())); - - leader_bitcoin_addresses.push(BitcoinAddress::from_bytes_legacy( - BitcoinNetworkType::Testnet, - LegacyBitcoinAddressType::PublicKeyHash, - &Hash160::from_data(&bitcoin_publickey.to_bytes()).0, - )); } let mut expected_burn_total: u64 = 0; @@ -728,7 +717,6 @@ fn test_burn_snapshot_sequence() { let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap(); let mut prev_snapshot = BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height); - let mut all_stacks_block_hashes = vec![]; for i in 0..32 { let mut block_ops = vec![]; @@ -819,7 +807,6 @@ fn test_burn_snapshot_sequence() { burn_header_hash: burn_block_hash.clone(), }; - all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone()); block_ops.push(BlockstackOperationType::LeaderBlockCommit( next_block_commit, )); diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs index 740fb69a41..d742a1caf5 100644 --- a/stackslib/src/burnchains/tests/db.rs +++ b/stackslib/src/burnchains/tests/db.rs @@ -915,8 +915,6 @@ fn test_update_block_descendancy_with_fork() { let mut cmts_genesis = vec![]; let mut cmts_invalid = vec![]; - let mut fork_parent = None; - let mut fork_parent_block_header: Option = None; let mut fork_cmts = vec![]; for i in 0..5 { @@ -950,7 +948,6 @@ fn test_update_block_descendancy_with_fork() { }; fork_headers.push(block_header.clone()); - fork_parent_block_header = Some(block_header); } let mut am_id = 0; @@ -1014,7 +1011,6 @@ fn test_update_block_descendancy_with_fork() { fork_cmts.push(fork_cmt.clone()); parent = Some(cmt); - fork_parent = Some(fork_cmt); if i == 0 { am_id = { diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 5af5848ed2..2b50656df6 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -577,13 +577,10 @@ impl TestBurnchainBlock { pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); - for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} + for tx in self.txs.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = tx { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); } } } diff --git a/stackslib/src/chainstate/burn/db/sortdb.rs b/stackslib/src/chainstate/burn/db/sortdb.rs index 791ab19006..2fb6c1ca86 100644 --- a/stackslib/src/chainstate/burn/db/sortdb.rs +++ b/stackslib/src/chainstate/burn/db/sortdb.rs @@ -4896,16 +4896,12 @@ impl SortitionDB { let qry = "SELECT * FROM snapshots WHERE sortition_id = ?1"; let args = [&sortition_id]; query_row_panic(conn, qry, &args, || { - format!( - "FATAL: multiple block snapshots for the same block {}", - sortition_id - ) + format!("FATAL: multiple block snapshots for the same block {sortition_id}") }) - .map(|x| { + .inspect(|x| { if x.is_none() { - test_debug!("No snapshot with sortition ID {}", sortition_id); + test_debug!("No snapshot with sortition ID {sortition_id}"); } - x }) } diff --git a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs index 33f8dd3af0..cc8fd0a225 100644 --- a/stackslib/src/chainstate/burn/operations/leader_block_commit.rs +++ b/stackslib/src/chainstate/burn/operations/leader_block_commit.rs @@ -1131,19 +1131,17 @@ impl LeaderBlockCommitOp { .is_after_pox_sunset_end(self.block_height, epoch.epoch_id) { // sunset has begun and we're not in epoch 2.1 or later, so apply sunset check - self.check_after_pox_sunset().map_err(|e| { - warn!("Invalid block-commit: bad PoX after sunset: {:?}", &e; + self.check_after_pox_sunset().inspect_err(|e| { + warn!("Invalid block-commit: bad PoX after sunset: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })?; vec![] } else { // either in epoch 2.1, or the PoX sunset hasn't completed yet self.check_pox(epoch.epoch_id, burnchain, tx, reward_set_info) - .map_err(|e| { - warn!("Invalid block-commit: bad PoX: {:?}", &e; + .inspect_err(|e| { + warn!("Invalid block-commit: bad PoX: {e:?}"; "apparent_sender" => %apparent_sender_repr); - e })? }; diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs index b0ddcba585..8c37b4e511 100644 --- a/stackslib/src/chainstate/coordinator/tests.rs +++ b/stackslib/src/chainstate/coordinator/tests.rs @@ -2253,7 +2253,6 @@ fn test_sortition_with_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2427,10 +2426,6 @@ fn test_sortition_with_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2525,7 +2520,6 @@ fn test_sortition_with_burner_reward_set() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -2673,10 +2667,6 @@ fn test_sortition_with_burner_reward_set() { let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); @@ -2789,7 +2779,6 @@ fn test_pox_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_cycle_count = 0; @@ -2957,10 +2946,6 @@ fn test_pox_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -3081,7 +3066,6 @@ fn test_stx_transfer_btc_ops() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // track the reward set consumption let mut reward_recipients = HashSet::new(); @@ -3304,10 +3288,6 @@ fn test_stx_transfer_btc_ops() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5288,7 +5268,6 @@ fn test_sortition_with_sunset() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5472,10 +5451,6 @@ fn test_sortition_with_sunset() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -5601,7 +5576,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { let mut started_first_reward_cycle = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // split up the vrf keys and committers so that we have some that will be mining "correctly" // and some that will be producing bad outputs @@ -5813,10 +5787,6 @@ fn test_sortition_with_sunset_and_epoch_switch() { if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { started_first_reward_cycle = true; - // store the anchor block for this sortition for later checking - let ic = sort_db.index_handle_at_tip(); - let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap(); - anchor_blocks.push(bhh); } else { // store the anchor block for this sortition for later checking let ic = sort_db.index_handle_at_tip(); @@ -6464,7 +6434,6 @@ fn test_pox_fork_out_of_order() { let mut sortition_ids_diverged = false; // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut anchor_blocks = vec![]; // setup: // 2 forks: 0 - 1 - 2 - 3 - 4 - 5 - 11 - 12 - 13 - 14 - 15 @@ -6545,8 +6514,6 @@ fn test_pox_fork_out_of_order() { .unwrap() .block_height ); - - anchor_blocks.push(bhh); } let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs index bf67398d14..72ebc8ff07 100644 --- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs +++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs @@ -112,7 +112,6 @@ fn advance_to_nakamoto( let default_pox_addr = PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes().clone()); - let mut tip = None; for sortition_height in 0..11 { // stack to pox-3 in cycle 7 let txs = if sortition_height == 6 { @@ -156,7 +155,7 @@ fn advance_to_nakamoto( vec![] }; - tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce)); + peer.tenure_with_txs(&txs, &mut peer_nonce); } // peer is at the start of cycle 8 } @@ -347,9 +346,6 @@ fn replay_reward_cycle( .step_by(reward_cycle_length) .collect(); - let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect(); - indexes.shuffle(&mut thread_rng()); - for burn_ops in burn_ops.iter() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); } @@ -842,7 +838,6 @@ fn block_descendant() { boot_plan.pox_constants = pox_constants; let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None); - let mut blocks = vec![]; let pox_constants = peer.sortdb().pox_constants.clone(); let first_burn_height = peer.sortdb().first_block_height; @@ -851,7 +846,6 @@ fn block_descendant() { loop { let (block, burn_height, ..) = peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true); - blocks.push(block); if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) { info!("At prepare phase start"; "burn_height" => burn_height); @@ -3196,9 +3190,6 @@ fn test_stacks_on_burnchain_ops() { ); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut consensus_hashes = vec![]; - let mut fee_counts = vec![]; let stx_miner_key = peer.miner.nakamoto_miner_key(); let mut extra_burn_ops = vec![]; @@ -3395,8 +3386,6 @@ fn test_stacks_on_burnchain_ops() { }) .sum::(); - consensus_hashes.push(consensus_hash); - fee_counts.push(fees); let mut blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -3438,7 +3427,6 @@ fn test_stacks_on_burnchain_ops() { ); all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } // check receipts for burn ops diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index a36e52512d..d9ad1319f7 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,6 +150,21 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +/// Structure returned from `NakamotoBlockBuilder::build_nakamoto_block` with +/// information about the block that was built. +pub struct BlockMetadata { + /// The block that was built + pub block: NakamotoBlock, + /// The execution cost consumed so far by the current tenure + pub tenure_consumed: ExecutionCost, + /// The cost budget for the current tenure + pub tenure_budget: ExecutionCost, + /// The size of the blocks in the current tenure in bytes + pub tenure_size: u64, + /// The events emitted by the transactions included in this block + pub tx_events: Vec, +} + impl NakamotoBlockBuilder { /// Make a block builder from genesis (testing only) pub fn new_first_block( @@ -526,7 +541,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_bitvec_len: u16, - ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + ) -> Result { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -556,7 +571,7 @@ impl NakamotoBlockBuilder { builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_info.cause())?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; - let block_limit = tenure_tx + let tenure_budget = tenure_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); @@ -570,7 +585,7 @@ impl NakamotoBlockBuilder { (1..=100).contains(&percentage), "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" ); - let mut remaining_limit = block_limit.clone(); + let mut remaining_limit = tenure_budget.clone(); let cost_so_far = tenure_tx.cost_so_far(); if remaining_limit.sub(&cost_so_far).is_ok() && remaining_limit.divide(100).is_ok() { remaining_limit.multiply(percentage.into()).expect( @@ -581,7 +596,7 @@ impl NakamotoBlockBuilder { "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; "remaining_limit" => %remaining_limit, "cost_so_far" => %cost_so_far, - "block_limit" => %block_limit, + "block_limit" => %tenure_budget, ); soft_limit = Some(remaining_limit); }; @@ -630,13 +645,13 @@ impl NakamotoBlockBuilder { // save the block so we can build microblocks off of it let block = builder.mine_nakamoto_block(&mut tenure_tx); - let size = builder.bytes_so_far; - let consumed = builder.tenure_finish(tenure_tx)?; + let tenure_size = builder.bytes_so_far; + let tenure_consumed = builder.tenure_finish(tenure_tx)?; let ts_end = get_epoch_time_ms(); set_last_mined_block_transaction_count(block.txs.len() as u64); - set_last_mined_execution_cost_observed(&consumed, &block_limit); + set_last_mined_execution_cost_observed(&tenure_consumed, &tenure_budget); info!( "Miner: mined Nakamoto block"; @@ -645,14 +660,20 @@ impl NakamotoBlockBuilder { "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, - "block_size" => size, - "execution_consumed" => %consumed, - "percent_full" => block_limit.proportion_largest_dimension(&consumed), + "block_size" => tenure_size, + "execution_consumed" => %tenure_consumed, + "percent_full" => tenure_budget.proportion_largest_dimension(&tenure_consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); - Ok((block, consumed, size, tx_events)) + Ok(BlockMetadata { + block, + tenure_consumed, + tenure_budget, + tenure_size, + tx_events, + }) } pub fn get_bytes_so_far(&self) -> u64 { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index ceb9b42d91..056bd53fe4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -1710,29 +1710,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_processed(block_id).map_err(|e| { - warn!("Failed to mark {} as processed: {:?}", block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_processed(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as processed: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -1748,29 +1745,26 @@ impl NakamotoChainState { block_id: &StacksBlockId, ) { loop { - let Ok(staging_block_tx) = stacks_chain_state.staging_db_tx_begin().map_err(|e| { - warn!("Failed to begin staging DB tx: {:?}", &e); - e - }) else { + let Ok(staging_block_tx) = stacks_chain_state + .staging_db_tx_begin() + .inspect_err(|e| warn!("Failed to begin staging DB tx: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.set_block_orphaned(block_id).map_err(|e| { - warn!("Failed to mark {} as orphaned: {:?}", &block_id, &e); - e - }) else { + let Ok(_) = staging_block_tx + .set_block_orphaned(block_id) + .inspect_err(|e| warn!("Failed to mark {block_id} as orphaned: {e:?}")) + else { sleep_ms(1000); continue; }; - let Ok(_) = staging_block_tx.commit().map_err(|e| { - warn!( - "Failed to commit staging block tx for {}: {:?}", - &block_id, &e - ); - e - }) else { + let Ok(_) = staging_block_tx + .commit() + .inspect_err(|e| warn!("Failed to commit staging block tx for {block_id}: {e:?}")) + else { sleep_ms(1000); continue; }; @@ -2352,12 +2346,11 @@ impl NakamotoChainState { let miner_pubkey_hash160 = leader_key .interpret_nakamoto_signing_key() .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!( "Leader key did not contain a hash160 of the miner signing public key"; "leader_key" => ?leader_key, ); - e })?; // attaches to burn chain @@ -2476,7 +2469,7 @@ impl NakamotoChainState { ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block - if let Some(_) = Self::get_block_header(headers_conn, &block.header.block_id())? { + if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { debug!("Already have block {}", &block.header.block_id()); return Ok(false); } @@ -2959,12 +2952,11 @@ impl NakamotoChainState { warn!("No VRF proof for {}", &parent_sn.consensus_hash); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find parent VRF proof"; "tip_block_id" => %tip_block_id, "parent consensus_hash" => %parent_sn.consensus_hash, "block consensus_hash" => %consensus_hash); - e })?; Ok(parent_vrf_proof) @@ -3029,12 +3021,11 @@ impl NakamotoChainState { } let proof = VRFProof::from_hex(&bytes) .ok_or(DBError::Corruption) - .map_err(|e| { + .inspect_err(|_e| { warn!("Failed to load VRF proof: could not decode"; "vrf_proof" => %bytes, "tenure_start_block_id" => %tenure_start_block_id, ); - e })?; Ok(Some(proof)) } else { @@ -3087,25 +3078,23 @@ impl NakamotoChainState { let sn = SortitionDB::get_block_snapshot_consensus(sortdb_conn, &block.header.consensus_hash)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; let block_commit = get_block_commit_by_txid(sortdb_conn, &sn.sortition_id, &sn.winning_block_txid)? .ok_or(ChainstateError::NoSuchBlockError) - .map_err(|e| { + .inspect_err(|_e| { warn!("No block-commit for block"; "consensus_hash" => %block.header.consensus_hash, "stacks_block_hash" => %block.header.block_hash(), "stacks_block_id" => %block.header.block_id() ); - e })?; // N.B. passing block.block_id() here means that we'll look into the parent tenure diff --git a/stackslib/src/chainstate/nakamoto/shadow.rs b/stackslib/src/chainstate/nakamoto/shadow.rs index bd9b28fac7..7636e146ee 100644 --- a/stackslib/src/chainstate/nakamoto/shadow.rs +++ b/stackslib/src/chainstate/nakamoto/shadow.rs @@ -347,14 +347,13 @@ impl NakamotoChainState { let vrf_proof = Self::get_block_vrf_proof(chainstate_conn, tip_block_id, &tenure_consensus_hash)? .ok_or_else(|| { - warn!("No VRF proof for {}", &tenure_consensus_hash); + warn!("No VRF proof for {tenure_consensus_hash}"); ChainstateError::NoSuchBlockError }) - .map_err(|e| { + .inspect_err(|_e| { warn!("Could not find shadow tenure VRF proof"; "tip_block_id" => %tip_block_id, "shadow consensus_hash" => %tenure_consensus_hash); - e })?; return Ok(Some(vrf_proof)); diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs index 5e525f2f88..5c588d746a 100644 --- a/stackslib/src/chainstate/nakamoto/tests/mod.rs +++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs @@ -2518,8 +2518,6 @@ fn parse_vote_for_aggregate_public_key_invalid() { }; invalid_function_arg_reward_cycle.set_origin_nonce(1); - let mut account_nonces = std::collections::HashMap::new(); - account_nonces.insert(invalid_contract_name.origin_address(), 1); for (i, tx) in vec![ invalid_contract_address, invalid_contract_name, diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index ae331df3f6..f9741d060e 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -353,16 +353,13 @@ impl StacksMessageCodec for StacksBlock { // must be only one coinbase let mut coinbase_count = 0; for tx in txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - coinbase_count += 1; - if coinbase_count > 1 { - return Err(codec_error::DeserializeError( - "Invalid block: multiple coinbases found".to_string(), - )); - } + if let TransactionPayload::Coinbase(..) = tx.payload { + coinbase_count += 1; + if coinbase_count > 1 { + return Err(codec_error::DeserializeError( + "Invalid block: multiple coinbases found".to_string(), + )); } - _ => {} } } @@ -515,26 +512,23 @@ impl StacksBlock { let mut found_coinbase = false; let mut coinbase_index = 0; for (i, tx) in txs.iter().enumerate() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - if !check_present { - warn!("Found unexpected coinbase tx {}", tx.txid()); - return false; - } - - if found_coinbase { - warn!("Found duplicate coinbase tx {}", tx.txid()); - return false; - } - - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { - warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); - return false; - } - found_coinbase = true; - coinbase_index = i; + if let TransactionPayload::Coinbase(..) = tx.payload { + if !check_present { + warn!("Found unexpected coinbase tx {}", tx.txid()); + return false; + } + + if found_coinbase { + warn!("Found duplicate coinbase tx {}", tx.txid()); + return false; + } + + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { + warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); + return false; } - _ => {} + found_coinbase = true; + coinbase_index = i; } } @@ -1137,19 +1131,6 @@ mod test { StacksEpochId::latest(), ); - // remove all coinbases - let mut txs_anchored = vec![]; - - for tx in all_txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} - } - txs_anchored.push(tx); - } - // make microblocks with 3 transactions each (or fewer) for i in 0..(all_txs.len() / 3) { let txs = vec![ diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 9387c02bff..2d88cfe234 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -486,7 +486,7 @@ impl BurnStateDB for TestSimBurnStateDB { height: u32, sortition_id: &SortitionId, ) -> Option<(Vec, u128)> { - if let Some(_) = self.get_burn_header_hash(height, sortition_id) { + if self.get_burn_header_hash(height, sortition_id).is_some() { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 67485301ad..ff5be1d0e5 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1371,23 +1371,20 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == charlie_address { - assert!( - r.execution_cost != ExecutionCost::ZERO, - "Execution cost is not zero!" - ); - charlie_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + assert!( + r.execution_cost != ExecutionCost::ZERO, + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 8e81c2965b..136559a195 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -930,16 +930,13 @@ fn pox_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 2fd23fb2e6..392c6b2cd1 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -928,6 +928,8 @@ fn pox_lock_unlock() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; // Advance into pox4 @@ -2693,6 +2695,8 @@ fn pox_4_delegate_stack_increase_events() { assert_eq!(burnchain.pox_constants.reward_slots(), 6); let mut coinbase_nonce = 0; + // Stores the result of a function with side effects, so have Clippy ignore it + #[allow(clippy::collection_is_never_read)] let mut latest_block = None; let alice_key = keys.pop().unwrap(); @@ -9566,16 +9570,13 @@ fn missed_slots_no_unlock() { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 46682b6f86..8e6c0da9de 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -500,20 +500,19 @@ impl StacksChainState { .open(&path_tmp) .map_err(|e| { if e.kind() == io::ErrorKind::NotFound { - error!("File not found: {:?}", &path_tmp); + error!("File not found: {path_tmp:?}"); Error::DBError(db_error::NotFoundError) } else { - error!("Failed to open {:?}: {:?}", &path_tmp, &e); + error!("Failed to open {path_tmp:?}: {e:?}"); Error::DBError(db_error::IOError(e)) } })?; - writer(&mut fd).map_err(|e| { + writer(&mut fd).inspect_err(|_e| { if delete_on_error { // abort let _ = fs::remove_file(&path_tmp); } - e })?; fd.sync_all() @@ -3983,7 +3982,7 @@ impl StacksChainState { } for (consensus_hash, anchored_block_hash) in to_delete.into_iter() { - info!("Orphan {}/{}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork.", &consensus_hash, &anchored_block_hash); + info!("Orphan {consensus_hash}/{anchored_block_hash}: it does not connect to a previously-accepted block, because its consensus hash does not match an existing snapshot on the valid PoX fork."); let _ = StacksChainState::set_block_processed( blocks_tx, None, @@ -3992,12 +3991,8 @@ impl StacksChainState { &anchored_block_hash, false, ) - .map_err(|e| { - warn!( - "Failed to orphan {}/{}: {:?}", - &consensus_hash, &anchored_block_hash, &e - ); - e + .inspect_err(|e| { + warn!("Failed to orphan {consensus_hash}/{anchored_block_hash}: {e:?}") }); } @@ -5142,7 +5137,7 @@ impl StacksChainState { ) { Ok(miner_rewards_opt) => miner_rewards_opt, Err(e) => { - if let Some(_) = miner_id_opt { + if miner_id_opt.is_some() { return Err(e); } else { let msg = format!("Failed to load miner rewards: {:?}", &e); @@ -11201,15 +11196,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = @@ -11884,15 +11876,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6e543be3f1..14fece138e 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1844,10 +1844,7 @@ impl StacksChainState { let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; - let init_required = match fs::metadata(&clarity_state_index_marf) { - Ok(_) => false, - Err(_) => true, - }; + let init_required = fs::metadata(&clarity_state_index_marf).is_err(); let state_index = StacksChainState::open_db(mainnet, chain_id, &header_index_root)?; @@ -2754,11 +2751,8 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances @@ -2874,11 +2868,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = @@ -2964,11 +2955,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs index 17e5a3c6e3..98e8779ecc 100644 --- a/stackslib/src/chainstate/stacks/db/transactions.rs +++ b/stackslib/src/chainstate/stacks/db/transactions.rs @@ -72,8 +72,9 @@ impl TryFrom for HashableClarityValue { impl std::hash::Hash for HashableClarityValue { fn hash(&self, state: &mut H) { - #[allow(clippy::unwrap_used)] + #[allow(clippy::unwrap_used, clippy::collection_is_never_read)] // this unwrap is safe _as long as_ TryFrom was used as a constructor + // Also, this function has side effects, which cause Clippy to wrongly think `bytes` is unused let bytes = self.0.serialize_to_vec().unwrap(); bytes.hash(state); } diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index c5b5323f39..1a1e9673ae 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -258,12 +258,11 @@ impl TrieCache { TrieCache::Everything(ref mut state) => { state.store_node_and_hash(block_id, trieptr, node, hash); } - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node_and_hash(block_id, trieptr, TrieNodeType::Node256(data), hash); } - _ => {} - }, + } } } @@ -273,12 +272,11 @@ impl TrieCache { match self { TrieCache::Noop(_) => {} TrieCache::Everything(ref mut state) => state.store_node(block_id, trieptr, node), - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node(block_id, trieptr, TrieNodeType::Node256(data)) } - _ => {} - }, + } } } diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 5a7da69e52..52f571aa1f 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -194,11 +194,8 @@ impl TrieFile { .map(|stat| Some(stat.len())) .unwrap_or(None); - match (size_before_opt, size_after_opt) { - (Some(sz_before), Some(sz_after)) => { - debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); - } - _ => {} + if let (Some(sz_before), Some(sz_after)) = (size_before_opt, size_after_opt) { + debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); } Ok(()) @@ -213,7 +210,7 @@ impl TrieFile { let mut set_sqlite_tmpdir = false; let mut old_tmpdir_opt = None; if let Some(parent_path) = Path::new(db_path).parent() { - if let Err(_) = env::var("SQLITE_TMPDIR") { + if env::var("SQLITE_TMPDIR").is_err() { debug!( "Sqlite will store temporary migration state in '{}'", parent_path.display() @@ -461,11 +458,8 @@ impl TrieFile { self.write_all(buf)?; self.flush()?; - match self { - TrieFile::Disk(ref mut data) => { - data.fd.sync_data()?; - } - _ => {} + if let TrieFile::Disk(ref mut data) = self { + data.fd.sync_data()?; } Ok(offset) } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 368c21c204..c3873d4cf9 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -440,13 +440,12 @@ impl<'a, T: MarfTrieId> MarfTransaction<'a, T> { if new_extension { self.set_block_heights(chain_tip, next_chain_tip, block_height) - .map_err(|e| { + .inspect_err(|_e| { self.open_chain_tip.take(); - e })?; } - debug!("Opened {} to {}", chain_tip, next_chain_tip); + debug!("Opened {chain_tip} to {next_chain_tip}"); Ok(()) } @@ -932,9 +931,8 @@ impl MARF { let mut cursor = TrieCursor::new(path, storage.root_trieptr()); // walk to insertion point - let mut node = Trie::read_root_nohash(storage).map_err(|e| { - test_debug!("Failed to read root of {:?}: {:?}", block_hash, &e); - e + let mut node = Trie::read_root_nohash(storage).inspect_err(|_e| { + test_debug!("Failed to read root of {block_hash:?}: {_e:?}"); })?; for _ in 0..(cursor.path.len() + 1) { @@ -956,7 +954,7 @@ impl MARF { )); } - trace!("Cursor reached leaf {:?}", &node); + trace!("Cursor reached leaf {node:?}"); storage.bench_mut().marf_walk_from_finish(); return Ok((cursor, node)); } @@ -1035,24 +1033,16 @@ impl MARF { block_hash: &T, path: &TrieHash, ) -> Result, Error> { - trace!("MARF::get_path({:?}) {:?}", block_hash, path); + trace!("MARF::get_path({block_hash:?}) {path:?}"); // a NotFoundError _here_ means that a block didn't exist - storage.open_block(block_hash).map_err(|e| { - test_debug!("Failed to open block {:?}: {:?}", block_hash, &e); - e + storage.open_block(block_hash).inspect_err(|_e| { + test_debug!("Failed to open block {block_hash:?}: {_e:?}"); })?; // a NotFoundError _here_ means that the key doesn't exist in this view - let (cursor, node) = MARF::walk(storage, block_hash, path).map_err(|e| { - trace!( - "Failed to look up key {:?} {:?}: {:?}", - &block_hash, - path, - &e - ); - e - })?; + let (cursor, node) = MARF::walk(storage, block_hash, path) + .inspect_err(|e| trace!("Failed to look up key {block_hash:?} {path:?}: {e:?}"))?; // both of these get caught by get_by_key and turned into Ok(None) // and a lot of downstream code seems to depend on that behavior, but @@ -1177,13 +1167,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed path lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed path lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1208,13 +1194,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed key lookup '{}': {:?}", key, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed key lookup '{key}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1237,13 +1219,9 @@ impl MARF { // restore storage .open_block_maybe_id(&cur_block_hash, cur_block_id) - .map_err(|e| { - warn!( - "Failed to re-open {} {:?}: {:?}", - &cur_block_hash, cur_block_id, &e - ); - warn!("Result of failed hash lookup '{}': {:?}", path, &result); - e + .inspect_err(|e| { + warn!("Failed to re-open {cur_block_hash} {cur_block_id:?}: {e:?}"); + warn!("Result of failed hash lookup '{path}': {result:?}"); })?; result.map(|option_result| option_result.map(|leaf| leaf.data)) @@ -1291,9 +1269,8 @@ impl MARF { // used in testing in order to short-circuit block-height lookups // when the trie struct is tested outside of marf.rs usage if height == 0 { - match storage.test_genesis_block { - Some(ref s) => return Ok(Some(s.clone())), - _ => {} + if let Some(ref s) = storage.test_genesis_block { + return Ok(Some(s.clone())); } } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index d66c1b72d3..2e53eca44a 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -1210,12 +1210,12 @@ impl TrieMerkleProof { }; // next proof item should be part of a segment proof - match proof[i] { - TrieMerkleProofType::Shunt(_) => { - test_debug!("Malformed proof -- exepcted segment proof following first shunt proof head at {}", i); - return false; - } - _ => {} + if let TrieMerkleProofType::Shunt(_) = proof[i] { + test_debug!( + "Malformed proof -- exepcted segment proof following first shunt proof head at {}", + i + ); + return false; } while i < proof.len() { diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index 0eb60f25b4..efc19b0afb 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -892,10 +892,8 @@ impl TrieRAM { let root_disk_ptr = BLOCK_HEADER_HASH_ENCODED_SIZE as u64 + 4; let root_ptr = TriePtr::new(TrieNodeID::Node256 as u8, 0, root_disk_ptr as u32); - let (mut root_node, root_hash) = read_nodetype(f, &root_ptr).map_err(|e| { - error!("Failed to read root node info for {:?}: {:?}", bhh, &e); - e - })?; + let (mut root_node, root_hash) = read_nodetype(f, &root_ptr) + .inspect_err(|e| error!("Failed to read root node info for {bhh:?}: {e:?}"))?; let mut next_index = 1; @@ -922,10 +920,8 @@ impl TrieRAM { let next_ptr = frontier .pop_front() .expect("BUG: no ptr in non-empty frontier"); - let (mut next_node, next_hash) = read_nodetype(f, &next_ptr).map_err(|e| { - error!("Failed to read node at {:?}: {:?}", &next_ptr, &e); - e - })?; + let (mut next_node, next_hash) = read_nodetype(f, &next_ptr) + .inspect_err(|e| error!("Failed to read node at {next_ptr:?}: {e:?}"))?; if !next_node.is_leaf() { // queue children in the same order we stored them @@ -1887,9 +1883,8 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // blow away db trie_sql::clear_tables(self.sqlite_tx())?; - match self.data.uncommitted_writes { - Some((_, ref mut trie_storage)) => trie_storage.format()?, - None => {} + if let Some((_, ref mut trie_storage)) = self.data.uncommitted_writes { + trie_storage.format()? }; self.data.set_block(T::sentinel(), None); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index ec099ef7cd..0df76cec4a 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1270,11 +1270,8 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); let f = TrieFileStorage::open(&path, marf_opts).unwrap(); @@ -1549,12 +1546,9 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - match fs::metadata(&path) { - Err(_) => { - eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); - return; - } - Ok(_) => {} + if fs::metadata(&path).is_err() { + eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); + return; }; let marf_opts = MARFOpenOpts::default(); let mut f_store = TrieFileStorage::new_memory(marf_opts).unwrap(); @@ -2166,7 +2160,7 @@ fn test_marf_begin_from_sentinel_twice() { #[test] fn test_marf_unconfirmed() { let marf_path = "/tmp/test_marf_unconfirmed"; - if let Ok(_) = std::fs::metadata(marf_path) { + if std::fs::metadata(marf_path).is_ok() { std::fs::remove_file(marf_path).unwrap(); } let marf_opts = MARFOpenOpts::default(); diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs index f4be2fdfd0..bcf5fef64a 100644 --- a/stackslib/src/chainstate/stacks/index/test/trie.rs +++ b/stackslib/src/chainstate/stacks/index/test/trie.rs @@ -1232,8 +1232,6 @@ fn trie_cursor_splice_leaf_4() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; - // splice in a node in each path segment for k in 0..5 { let mut path = vec![ @@ -1261,7 +1259,6 @@ fn trie_cursor_splice_leaf_4() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); @@ -1325,7 +1322,6 @@ fn trie_cursor_splice_leaf_2() { let (nodes, node_ptrs, hashes) = make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec()); - let mut ptrs = vec![]; // splice in a node in each path segment for k in 0..10 { @@ -1350,7 +1346,6 @@ fn trie_cursor_splice_leaf_2() { &mut node, ) .unwrap(); - ptrs.push(new_ptr); Trie::update_root_hash(&mut f, &c).unwrap(); diff --git a/stackslib/src/chainstate/stacks/index/trie.rs b/stackslib/src/chainstate/stacks/index/trie.rs index 0603c74c43..e701858fd1 100644 --- a/stackslib/src/chainstate/stacks/index/trie.rs +++ b/stackslib/src/chainstate/stacks/index/trie.rs @@ -217,22 +217,19 @@ impl Trie { // ptr is a backptr -- find the block let back_block_hash = storage .get_block_from_local_id(ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!("Failed to get block from local ID {}", ptr.back_block()); - e })? .clone(); storage .open_block_known_id(&back_block_hash, ptr.back_block()) - .map_err(|e| { + .inspect_err(|_e| { test_debug!( - "Failed to open block {} with id {}: {:?}", + "Failed to open block {} with id {}: {_e:?}", &back_block_hash, ptr.back_block(), - &e ); - e })?; let backptr = ptr.from_backptr(); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index f4c7286f58..9e5fd383b9 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1137,24 +1137,20 @@ impl<'a> StacksMicroblockBuilder<'a> { TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { error, .. }) => { test_debug!("Exclude tx {} from microblock", tx.txid()); - match &error { - Error::BlockTooBigError => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - test_debug!("Block budget exceeded on tx {}", &tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - test_debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit - == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - test_debug!( - "Stop mining microblock block due to limit exceeded" - ); - break; - } + if let Error::BlockTooBigError = &error { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + test_debug!("Block budget exceeded on tx {}", &tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + test_debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + test_debug!( + "Stop mining microblock block due to limit exceeded" + ); + break; } - _ => {} } continue; } @@ -1188,12 +1184,9 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.considered.replace(considered); self.runtime.num_mined = num_txs; - match result { - Err(e) => { - warn!("Error producing microblock: {}", e); - return Err(e); - } - _ => {} + if let Err(e) = result { + warn!("Error producing microblock: {}", e); + return Err(e); } return self.make_next_microblock(txs_included, miner_key, tx_events, None); @@ -2258,7 +2251,13 @@ impl StacksBlockBuilder { // nakamoto miner tenure start heuristic: // mine an empty block so you can start your tenure quickly! if let Some(tx) = initial_txs.first() { - if matches!(&tx.payload, TransactionPayload::TenureChange(_)) { + if matches!( + &tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) { info!("Nakamoto miner heuristic: during tenure change blocks, produce a fast short block to begin tenure"); return Ok((false, tx_events)); } diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index a16fbeddf4..f82da31499 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1568,11 +1568,8 @@ pub mod test { } for tx in all_txs.into_iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); if txs_anchored.len() >= num_txs { diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs index 2851941120..ff3b674f44 100644 --- a/stackslib/src/chainstate/stacks/tests/block_construction.rs +++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs @@ -187,7 +187,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -271,8 +270,6 @@ fn test_build_anchored_blocks_stx_transfers_single() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -324,7 +321,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -412,8 +408,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -462,7 +456,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -574,8 +567,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1369,7 +1360,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let mut sender_nonce = 0; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -1509,8 +1499,6 @@ fn test_build_anchored_blocks_skip_too_expensive() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1788,7 +1776,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -1883,8 +1870,6 @@ fn test_build_anchored_blocks_multiple_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -1930,7 +1915,6 @@ fn test_build_anchored_blocks_empty_chaintips() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -2019,8 +2003,6 @@ fn test_build_anchored_blocks_empty_chaintips() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2073,7 +2055,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -2197,8 +2178,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2229,7 +2208,6 @@ fn test_build_anchored_blocks_invalid() { let mut last_block: Option = None; let mut last_valid_block: Option = None; - let mut last_tip: Option = None; let mut last_parent: Option = None; let mut last_parent_tip: Option = None; @@ -2261,8 +2239,6 @@ fn test_build_anchored_blocks_invalid() { tip = resume_tip.clone().unwrap(); } - last_tip = Some(tip.clone()); - let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| { let parent_opt = if tenure_id != bad_block_tenure { @@ -2438,7 +2414,6 @@ fn test_build_anchored_blocks_bad_nonces() { sn.block_height }; - let mut last_block = None; for tenure_id in 0..num_blocks { eprintln!("Start tenure {:?}", tenure_id); // send transactions to the mempool @@ -2634,8 +2609,6 @@ fn test_build_anchored_blocks_bad_nonces() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); @@ -2693,7 +2666,6 @@ fn test_build_microblock_stream_forks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -2904,8 +2876,6 @@ fn test_build_microblock_stream_forks() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); } @@ -3487,15 +3457,7 @@ fn test_contract_call_across_clarity_versions() { let num_blocks = 10; let mut anchored_sender_nonce = 0; - - let mut mblock_privks = vec![]; - for _ in 0..num_blocks { - let mblock_privk = StacksPrivateKey::random(); - mblock_privks.push(mblock_privk); - } - let mut peer = TestPeer::new(peer_config); - let chainstate_path = peer.chainstate_path.clone(); let first_stacks_block_height = { @@ -4560,7 +4522,6 @@ fn mempool_incorporate_pox_unlocks() { let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV"; let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); - let mut last_block = None; for tenure_id in 0..num_blocks { // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()) @@ -4713,11 +4674,6 @@ fn mempool_incorporate_pox_unlocks() { let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - - last_block = Some(StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - )); } } @@ -4757,7 +4713,6 @@ fn test_fee_order_mismatch_nonce_order() { let recipient = StacksAddress::from_string(recipient_addr_str).unwrap(); let sender_nonce = 0; - let mut last_block = None; // send transactions to the mempool let tip = SortitionDB::get_canonical_burn_chain_tip(peer.sortdb.as_ref().unwrap().conn()).unwrap(); @@ -4846,8 +4801,6 @@ fn test_fee_order_mismatch_nonce_order() { }, ); - last_block = Some(stacks_block.clone()); - peer.next_burnchain_block(burn_ops); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs index 22bae4689f..385ab3d4d2 100644 --- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs +++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs @@ -281,8 +281,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -415,8 +413,6 @@ where chain_tip.anchored_header.as_stacks_epoch2().unwrap(), )); - sortition_winners.push(miner_1.origin_address().unwrap()); - let mut next_miner_trace = TestMinerTracePoint::new(); next_miner_trace.add( miner_1.id, @@ -631,7 +627,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); next_miner_trace.add( miner_1.id, @@ -653,7 +648,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); next_miner_trace.add( miner_2.id, @@ -735,8 +729,6 @@ where ], ); - let mut sortition_winners = vec![]; - let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap(); let mut fork = TestBurnchainFork::new( first_snapshot.block_height, @@ -960,7 +952,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -973,7 +964,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners.push(miner_2.origin_address().unwrap()); } // add both blocks to the miner trace, because in this test runner, there will be _two_ @@ -999,8 +989,6 @@ where test_debug!("\n\nMiner 1 and Miner 2 now separate\n\n"); - let mut sortition_winners_1 = sortition_winners.clone(); - let mut sortition_winners_2 = sortition_winners.clone(); let snapshot_at_fork = { let ic = burn_node.sortdb.index_conn(); let tip = fork.get_tip(&ic); @@ -1244,7 +1232,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_1.header )); - sortition_winners_1.push(miner_1.origin_address().unwrap()); } else { test_debug!( "\n\nMiner 2 ({}) won sortition\n", @@ -1257,7 +1244,6 @@ where &fork_snapshot.consensus_hash, &stacks_block_2.header )); - sortition_winners_2.push(miner_2.origin_address().unwrap()); } // each miner produced a block; just one of them got accepted diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 85b43fb742..259e2bf949 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,11 +338,8 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - match fs::metadata(&chainstate_path(new_test_name)) { - Ok(_) => { - fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); - } - Err(_) => {} + if fs::metadata(&chainstate_path(new_test_name)).is_ok() { + fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } copy_dir( @@ -524,21 +521,14 @@ impl TestStacksNode { fork_tip: &BlockSnapshot, miner: &TestMiner, ) -> Option { - for commit_op in miner.block_commits.iter().rev() { - match SortitionDB::get_block_snapshot_for_winning_stacks_block( + miner.block_commits.iter().rev().find_map(|commit_op| { + SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() - { - Some(sn) => { - return Some(sn); - } - None => {} - } - } - return None; + }) } pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { @@ -839,7 +829,6 @@ pub fn check_mining_reward( block_height: u64, prev_block_rewards: &[Vec], ) -> bool { - let mut block_rewards = HashMap::new(); let mut stream_rewards = HashMap::new(); let mut heights = HashMap::new(); let mut confirmed = HashSet::new(); @@ -849,9 +838,6 @@ pub fn check_mining_reward( &reward.consensus_hash, &reward.block_hash, ); - if reward.coinbase > 0 { - block_rewards.insert(ibh.clone(), reward.clone()); - } if let MinerPaymentTxFees::Epoch2 { streamed, .. } = &reward.tx_fees { if *streamed > 0 { stream_rewards.insert(ibh.clone(), reward.clone()); @@ -967,22 +953,11 @@ pub fn get_last_microblock_header( miner: &TestMiner, parent_block_opt: Option<&StacksBlock>, ) -> Option { - let last_microblocks_opt = - parent_block_opt.and_then(|block| node.get_microblock_stream(miner, &block.block_hash())); - - let last_microblock_header_opt = match last_microblocks_opt { - Some(last_microblocks) => { - if last_microblocks.is_empty() { - None - } else { - let l = last_microblocks.len() - 1; - Some(last_microblocks[l].header.clone()) - } - } - None => None, - }; - - last_microblock_header_opt + parent_block_opt + .and_then(|block| node.get_microblock_stream(miner, &block.block_hash())) + .as_ref() + .and_then(|mblock_stream| mblock_stream.last()) + .map(|mblock| mblock.header.clone()) } pub fn get_all_mining_rewards( @@ -990,17 +965,14 @@ pub fn get_all_mining_rewards( tip: &StacksHeaderInfo, block_height: u64, ) -> Vec> { - let mut ret = vec![]; let mut tx = chainstate.index_tx_begin(); - for i in 0..block_height { - let block_rewards = + (0..block_height) + .map(|i| { StacksChainState::get_scheduled_block_rewards_in_fork_at_height(&mut tx, tip, i) - .unwrap(); - ret.push(block_rewards); - } - - ret + .unwrap() + }) + .collect() } pub fn make_coinbase(miner: &mut TestMiner, burnchain_height: usize) -> StacksTransaction { @@ -1422,11 +1394,8 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 0e5fa22e6b..2ecddd947a 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1130,17 +1130,14 @@ impl StacksTransactionSigner { } pub fn sign_sponsor(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { - match self.tx.auth { - TransactionAuth::Sponsored(_, ref sponsor_condition) => { - if self.check_oversign - && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() - { - return Err(net_error::SigningError( - "Sponsor would have too many signatures".to_string(), - )); - } + if let TransactionAuth::Sponsored(_, ref sponsor_condition) = self.tx.auth { + if self.check_oversign + && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() + { + return Err(net_error::SigningError( + "Sponsor would have too many signatures".to_string(), + )); } - _ => {} } let next_sighash = self.tx.sign_next_sponsor(&self.sighash, privk)?; @@ -1930,24 +1927,21 @@ mod test { // test_debug!("mutate byte {}", &i); let mut cursor = io::Cursor::new(&tx_bytes); let mut reader = LogReader::from_reader(&mut cursor); - match StacksTransaction::consensus_deserialize(&mut reader) { - Ok(corrupt_tx) => { - let mut corrupt_tx_bytes = vec![]; - corrupt_tx - .consensus_serialize(&mut corrupt_tx_bytes) - .unwrap(); - if corrupt_tx_bytes.len() < tx_bytes.len() { - // didn't parse fully; the block-parsing logic would reject this block. - tx_bytes[i] = next_byte as u8; - continue; - } - if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { - eprintln!("corrupt tx: {:#?}", &corrupt_tx); - eprintln!("signed tx: {:#?}", &signed_tx); - assert!(false); - } + if let Ok(corrupt_tx) = StacksTransaction::consensus_deserialize(&mut reader) { + let mut corrupt_tx_bytes = vec![]; + corrupt_tx + .consensus_serialize(&mut corrupt_tx_bytes) + .unwrap(); + if corrupt_tx_bytes.len() < tx_bytes.len() { + // didn't parse fully; the block-parsing logic would reject this block. + tx_bytes[i] = next_byte as u8; + continue; + } + if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { + eprintln!("corrupt tx: {:#?}", &corrupt_tx); + eprintln!("signed tx: {:#?}", &signed_tx); + assert!(false); } - Err(_) => {} } // restore tx_bytes[i] = next_byte as u8; @@ -3393,9 +3387,6 @@ mod test { let function_name = ClarityName::try_from("hello-function-name").unwrap(); let function_args = vec![Value::Int(0)]; - let mut contract_name_bytes = vec![contract_name.len() as u8]; - contract_name_bytes.extend_from_slice(contract_name.as_bytes()); - let mut contract_call_bytes = vec![]; address .consensus_serialize(&mut contract_call_bytes) diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 32ef034098..8e11ff613c 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -642,7 +642,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -657,7 +657,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -671,7 +671,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -687,7 +687,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 6f770f5927..e03149dba4 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -737,13 +737,15 @@ fn get_first_block_in_tenure( } } None => { - if let Some(_) = get_stacks_header_column_from_table( + if get_stacks_header_column_from_table( conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), false, - ) { + ) + .is_some() + { return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 286e7f1854..f43812f2ba 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::OnChainRewardSetProvider; -use crate::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; +use crate::chainstate::nakamoto::miner::{BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; @@ -504,7 +504,21 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { None, 0, ) - .map(|(block, cost, size, _)| (block.header.block_hash(), block.txs, cost, size)) + .map( + |BlockMetadata { + block, + tenure_consumed, + tenure_size, + .. + }| { + ( + block.header.block_hash(), + block.txs, + tenure_consumed, + tenure_size, + ) + }, + ) } }; diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index ca800db3c1..a9a03d4861 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -16,7 +16,7 @@ pub mod chain_data; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; @@ -86,18 +86,41 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( OP_TX_VOTE_AGG_ESTIM_SIZE ); +/// Default maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate +/// may be increased to when RBFing a transaction const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x +/// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin +/// transaction const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; +/// Default number of reward cycles of blocks to sync in a non-full inventory +/// sync const INV_REWARD_CYCLES_TESTNET: u64 = 6; +/// Default minimum time to wait between mining blocks in milliseconds. The +/// value must be greater than or equal to 1000 ms because if a block is mined +/// within the same second as its parent, it will be rejected by the signers. const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +/// Default time in milliseconds to pause after receiving the first threshold +/// rejection, before proposing a new block. const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +/// Default time in milliseconds to pause after receiving subsequent threshold +/// rejections, before proposing a new block. const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +/// Default time in milliseconds to wait for a Nakamoto block after seeing a +/// burnchain block before submitting a block commit. const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +/// Default percentage of the remaining tenure cost limit to consume each block const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +/// Default number of seconds to wait in-between polling the sortition DB to +/// see if we need to extend the ongoing tenure (e.g. because the current +/// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; - -// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends -const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; +/// Default duration to wait before attempting to issue a tenure extend. +/// This should be greater than the signers' timeout. This is used for issuing +/// fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +/// Default percentage of block budget that must be used before attempting a +/// time-based tenure extend +const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -1191,9 +1214,13 @@ pub struct BurnchainConfig { pub process_exit_at_block_height: Option, pub poll_time_secs: u64, pub satoshis_per_byte: u64, + /// Maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate may + /// be increased to when RBFing a transaction pub max_rbf: u64, pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, + /// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin + /// transaction pub rbf_fee_increment: u64, pub first_burn_block_height: Option, pub first_burn_block_timestamp: Option, @@ -1579,9 +1606,8 @@ impl BurnchainConfigFile { .unwrap_or(default_burnchain_config.fault_injection_burnchain_block_delay), max_unspent_utxos: self .max_unspent_utxos - .map(|val| { + .inspect(|&val| { assert!(val <= 1024, "Value for max_unspent_utxos should be <= 1024"); - val }) .or(default_burnchain_config.max_unspent_utxos), }; @@ -2156,6 +2182,10 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// Percentage of block budget that must be used before attempting a time-based tenure extend + pub tenure_extend_cost_threshold: u64, + /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2194,6 +2224,16 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + tenure_extend_cost_threshold: DEFAULT_TENURE_EXTEND_COST_THRESHOLD, + + block_rejection_timeout_steps: { + let mut rejections_timeouts_default_map = HashMap::::new(); + rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); + rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); + rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); + rejections_timeouts_default_map + }, } } } @@ -2590,6 +2630,8 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub tenure_extend_cost_threshold: Option, + pub block_rejection_timeout_steps: Option>, } impl MinerConfigFile { @@ -2732,6 +2774,25 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + tenure_extend_cost_threshold: self.tenure_extend_cost_threshold.unwrap_or(miner_default_config.tenure_extend_cost_threshold), + + block_rejection_timeout_steps: { + if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { + let mut rejection_timeout_durations = HashMap::::new(); + for (slice, seconds) in block_rejection_timeout_items.iter() { + match slice.parse::() { + Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), + Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) + }; + } + if !rejection_timeout_durations.contains_key(&0) { + panic!("block_rejection_timeout_steps requires a definition for the '0' key/step"); + } + rejection_timeout_durations + } else{ + miner_default_config.block_rejection_timeout_steps + } + } }) } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index e7ff24db78..d21f46c3c1 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1092,11 +1092,8 @@ impl NonceCache { }; // In-memory cache - match self.cache.get_mut(&address) { - Some(nonce) => { - *nonce = value; - } - None => (), + if let Some(nonce) = self.cache.get_mut(&address) { + *nonce = value; } success diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs index d5a655d980..dcb5ec1979 100644 --- a/stackslib/src/core/tests/mod.rs +++ b/stackslib/src/core/tests/mod.rs @@ -2633,7 +2633,6 @@ fn test_filter_txs_by_type() { let mut mempool = MemPoolDB::open_test(false, 0x80000000, &chainstate_path).unwrap(); let addr = StacksAddress::new(1, Hash160([0xff; 20])).unwrap(); - let mut txs = vec![]; let block_height = 10; let mut total_len = 0; @@ -2697,8 +2696,7 @@ fn test_filter_txs_by_type() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); - txs.push(tx); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8731b78f42..6ad88d0b68 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1522,7 +1522,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if let Some(_) = stacks_blocks_available.get(&stacks_block_id) { + if stacks_blocks_available.get(&stacks_block_id).is_some() { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, diff --git a/stackslib/src/net/asn.rs b/stackslib/src/net/asn.rs index c28e82484b..fb1f66b481 100644 --- a/stackslib/src/net/asn.rs +++ b/stackslib/src/net/asn.rs @@ -122,9 +122,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Line does not match ANS4 regex".to_string(), )) - .map_err(|e| { - debug!("Failed to read line \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to read line \"{buf}\""); })?; let prefix_octets_str = caps @@ -132,9 +131,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ANS4 prefix".to_string(), )) - .map_err(|e| { - debug!("Failed to get octets of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get octets of \"{buf}\""); })? .as_str(); @@ -143,9 +141,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN4 prefix mask".to_string(), )) - .map_err(|e| { - debug!("Failed to get mask of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get mask of \"{buf}\""); })? .as_str(); @@ -154,9 +151,8 @@ impl ASEntry4 { .ok_or(net_error::DeserializeError( "Failed to read ASN ID".to_string(), )) - .map_err(|e| { - debug!("Failed to get ASN of \"{}\"", &buf); - e + .inspect_err(|_e| { + debug!("Failed to get ASN of \"{buf}\""); })? .as_str(); diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c382aa618d..49d1036a0b 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -195,45 +195,42 @@ impl AttachmentInstance { ) -> Option { if let Value::Tuple(ref attachment) = value { if let Ok(Value::Tuple(ref attachment_data)) = attachment.get("attachment") { - match ( + if let ( + Ok(Value::Sequence(SequenceData::Buffer(content_hash))), + Ok(Value::UInt(attachment_index)), + ) = ( attachment_data.get("hash"), attachment_data.get("attachment-index"), ) { - ( - Ok(Value::Sequence(SequenceData::Buffer(content_hash))), - Ok(Value::UInt(attachment_index)), - ) => { - let content_hash = if content_hash.data.is_empty() { - Hash160::empty() - } else { - match Hash160::from_bytes(&content_hash.data[..]) { - Some(content_hash) => content_hash, - _ => return None, - } - }; - let metadata = match attachment_data.get("metadata") { - Ok(metadata) => { - let mut serialized = vec![]; - metadata - .consensus_serialize(&mut serialized) - .expect("FATAL: invalid metadata"); - to_hex(&serialized[..]) - } - _ => String::new(), - }; - let instance = AttachmentInstance { - index_block_hash, - content_hash, - attachment_index: *attachment_index as u32, - stacks_block_height, - metadata, - contract_id: contract_id.clone(), - tx_id, - canonical_stacks_tip_height, - }; - return Some(instance); - } - _ => {} + let content_hash = if content_hash.data.is_empty() { + Hash160::empty() + } else { + match Hash160::from_bytes(&content_hash.data[..]) { + Some(content_hash) => content_hash, + _ => return None, + } + }; + let metadata = match attachment_data.get("metadata") { + Ok(metadata) => { + let mut serialized = vec![]; + metadata + .consensus_serialize(&mut serialized) + .expect("FATAL: invalid metadata"); + to_hex(&serialized[..]) + } + _ => String::new(), + }; + let instance = AttachmentInstance { + index_block_hash, + content_hash, + attachment_index: *attachment_index as u32, + stacks_block_height, + metadata, + contract_id: contract_id.clone(), + tx_id, + canonical_stacks_tip_height, + }; + return Some(instance); } } } diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs index 6227c6076c..11d1e4164a 100644 --- a/stackslib/src/net/atlas/tests.rs +++ b/stackslib/src/net/atlas/tests.rs @@ -685,20 +685,15 @@ fn test_downloader_context_attachment_requests() { let peer_url_3 = request_3.get_url().clone(); let request_4 = inventories_requests.pop().unwrap(); let peer_url_4 = request_4.get_url().clone(); - let mut responses = HashMap::new(); let response_1 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_1.clone(), Some(response_1.clone())); let response_2 = new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]); - responses.insert(peer_url_2, Some(response_2.clone())); let response_3 = new_attachments_inventory_response(vec![(0, vec![0, 1, 1]), (1, vec![1, 0, 0])]); - responses.insert(peer_url_3.clone(), Some(response_3.clone())); - responses.insert(peer_url_4, None); inventories_results .succeeded diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index fe45b62f12..2b16a4ac06 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -515,13 +515,10 @@ impl Neighbor { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer use std::env; - match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port)) { - Ok(asn_str) => { - neighbor.asn = asn_str.parse().unwrap(); - neighbor.org = neighbor.asn; - test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); - } - Err(_) => {} + if let Ok(asn_str) = env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port)) { + neighbor.asn = asn_str.parse().unwrap(); + neighbor.org = neighbor.asn; + test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); }; } @@ -544,13 +541,10 @@ impl Neighbor { let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes).map_err(net_error::DBError)?; - match asn_opt { - Some(a) => { - if a != 0 { - peer.asn = a; - } + if let Some(a) = asn_opt { + if a != 0 { + peer.asn = a; } - None => {} }; } Ok(Some(peer)) @@ -962,10 +956,9 @@ impl ConversationP2P { reply_message, request_preamble.seq, )?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!("Unable to reply a {}: {:?}", _msgtype, &e); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|e| debug!("Unable to reply a {_msgtype}: {e:?}"))?; Ok(reply_handle) } @@ -981,10 +974,9 @@ impl ConversationP2P { let _msgtype = forward_message.get_message_name().to_owned(); let fwd = self.sign_relay_message(local_peer, burnchain_view, relay_hints, forward_message)?; - let fwd_handle = self.relay_signed_message(fwd).map_err(|e| { - debug!("Unable to forward a {}: {:?}", _msgtype, &e); - e - })?; + let fwd_handle = self + .relay_signed_message(fwd) + .inspect_err(|e| debug!("Unable to forward a {_msgtype}: {e:?}"))?; Ok(fwd_handle) } @@ -1475,13 +1467,9 @@ impl ConversationP2P { neighbors: neighbor_addrs, }); let reply = self.sign_reply(chain_view, &local_peer.private_key, payload, preamble.seq)?; - let reply_handle = self.relay_signed_message(reply).map_err(|e| { - debug!( - "Outbox to {:?} is full; cannot reply to GetNeighbors", - &self - ); - e - })?; + let reply_handle = self + .relay_signed_message(reply) + .inspect_err(|_e| debug!("Outbox to {self:?} is full; cannot reply to GetNeighbors"))?; Ok(reply_handle) } @@ -1747,12 +1735,8 @@ impl ConversationP2P { &network.stacks_tip.block_hash, reward_cycle, )?; - let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).map_err(|e| { - warn!( - "Failed to create a NakamotoInv response to {:?}: {:?}", - get_nakamoto_inv, &e - ); - e + let nakamoto_inv = NakamotoInvData::try_from(&bitvec_bools).inspect_err(|e| { + warn!("Failed to create a NakamotoInv response to {get_nakamoto_inv:?}: {e:?}") })?; debug!( @@ -3110,11 +3094,8 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 09465721ba..6aa8aa8c08 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -379,6 +379,7 @@ pub struct ConnectionOptions { /// Units are milliseconds. A value of 0 means "never". pub log_neighbors_freq: u64, pub inv_sync_interval: u64, + // how many reward cycles of blocks to sync in a non-full inventory sync pub inv_reward_cycles: u64, pub download_interval: u64, pub pingback_timeout: u64, @@ -926,19 +927,16 @@ impl ConnectionInbox

{ let bytes_consumed = if let Some(ref mut preamble) = preamble_opt { let (message_opt, bytes_consumed) = self.consume_payload(protocol, preamble, &buf[offset..])?; - match message_opt { - Some(message) => { - // queue up - test_debug!( - "Consumed message '{}' (request {}) in {} bytes", - message.get_message_name(), - message.request_id(), - bytes_consumed - ); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!( + "Consumed message '{}' (request {}) in {} bytes", + message.get_message_name(), + message.request_id(), + bytes_consumed + ); + self.inbox.push_back(message); + consumed_message = true; }; bytes_consumed @@ -982,14 +980,11 @@ impl ConnectionInbox

{ if let Some(ref mut preamble) = preamble_opt { let (message_opt, _bytes_consumed) = self.consume_payload(protocol, preamble, &[])?; - match message_opt { - Some(message) => { - // queue up - test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); + self.inbox.push_back(message); + consumed_message = true; } } self.preamble = preamble_opt; diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 3c31ce41d4..4b5bee8975 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -158,7 +158,7 @@ impl LocalPeer { "Will be authenticating p2p messages with the following"; "public key" => &Secp256k1PublicKey::from_private(&pkey).to_hex(), "services" => &to_hex(&services.to_be_bytes()), - "Stacker DBs" => stacker_dbs.iter().map(|cid| format!("{}", &cid)).collect::>().join(",") + "Stacker DBs" => stacker_dbs.iter().map(|cid| cid.to_string()).collect::>().join(",") ); LocalPeer { diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index d94f45d4d6..1a9e8278bd 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -377,13 +377,10 @@ mod test { let mut resolved_addrs = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("www.google.com", 80).unwrap() { - Some(addrs) => { - test_debug!("addrs: {:?}", &addrs); - resolved_addrs = Some(addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup("www.google.com", 80).unwrap() { + test_debug!("addrs: {:?}", &addrs); + resolved_addrs = Some(addrs); + break; } sleep_ms(100); } @@ -423,13 +420,10 @@ mod test { if resolved_addrs.contains_key(*name) { continue; } - match client.poll_lookup(name, 80).unwrap() { - Some(addrs) => { - test_debug!("name {name} addrs: {addrs:?}"); - resolved_addrs.insert(name.to_string(), addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup(name, 80).unwrap() { + test_debug!("name {name} addrs: {addrs:?}"); + resolved_addrs.insert(name.to_string(), addrs); + break; } } @@ -452,13 +446,10 @@ mod test { let mut resolved_error = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("asdfjkl;", 80).unwrap() { - Some(resp) => { - test_debug!("addrs: {:?}", &resp); - resolved_error = Some(resp); - break; - } - None => {} + if let Some(resp) = client.poll_lookup("asdfjkl;", 80).unwrap() { + test_debug!("addrs: {:?}", &resp); + resolved_error = Some(resp); + break; } sleep_ms(100); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index ea9076ccd7..f832457259 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -1045,9 +1045,8 @@ impl PeerNetwork { /// Pass a hint to the downloader to re-scan pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { - match self.block_downloader { - Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), - None => {} + if let Some(ref mut dl) = self.block_downloader { + dl.hint_download_rescan(target_height, ibd) } } @@ -1978,11 +1977,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2016,11 +2014,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2480,9 +2477,8 @@ impl PeerNetwork { if done { // reset state if we're done - match self.block_downloader { - Some(ref mut downloader) => downloader.reset(), - None => {} + if let Some(ref mut downloader) = self.block_downloader { + downloader.reset() } } diff --git a/stackslib/src/net/download/nakamoto/download_state_machine.rs b/stackslib/src/net/download/nakamoto/download_state_machine.rs index b89649799d..3f60752d1d 100644 --- a/stackslib/src/net/download/nakamoto/download_state_machine.rs +++ b/stackslib/src/net/download/nakamoto/download_state_machine.rs @@ -1186,12 +1186,11 @@ impl NakamotoDownloadStateMachine { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e - ); - e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr + ) }); debug!( @@ -1257,13 +1256,11 @@ impl NakamotoDownloadStateMachine { { if let Some(highest_complete_tenure_downloader) = downloader .make_highest_complete_tenure_downloader() - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to make highest complete tenure downloader for {:?}: {:?}", - &downloader.unconfirmed_tenure_id(), - &e - ); - e + "Failed to make highest complete tenure downloader for {:?}: {e:?}", + &downloader.unconfirmed_tenure_id() + ) }) .ok() { diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader.rs b/stackslib/src/net/download/nakamoto/tenure_downloader.rs index 1d4d680c43..6e98703956 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader.rs @@ -781,9 +781,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_start_block(block)?; Ok(None) @@ -794,9 +793,8 @@ impl NakamotoTenureDownloader { &block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let block = response.decode_nakamoto_block().map_err(|e| { - warn!("Failed to decode response for a Nakamoto block: {:?}", &e); - e + let block = response.decode_nakamoto_block().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto block: {e:?}") })?; self.try_accept_tenure_end_block(&block)?; Ok(None) @@ -807,9 +805,8 @@ impl NakamotoTenureDownloader { &end_block_id, get_epoch_time_ms().saturating_sub(start_request_time) ); - let blocks = response.decode_nakamoto_tenure().map_err(|e| { - warn!("Failed to decode response for a Nakamoto tenure: {:?}", &e); - e + let blocks = response.decode_nakamoto_tenure().inspect_err(|e| { + warn!("Failed to decode response for a Nakamoto tenure: {e:?}") })?; let blocks_opt = self.try_accept_tenure_blocks(blocks)?; Ok(blocks_opt) diff --git a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs index f8054828b6..d73342164e 100644 --- a/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs +++ b/stackslib/src/net/download/nakamoto/tenure_downloader_set.rs @@ -571,12 +571,11 @@ impl NakamotoTenureDownloaderSet { let _ = downloader .try_advance_from_chainstate(chainstate) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to advance downloader in state {} for {}: {:?}", - &downloader.state, &downloader.naddr, &e + "Failed to advance downloader in state {} for {}: {e:?}", + &downloader.state, &downloader.naddr ); - e }); debug!( diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index dd5ad7e046..8ccb214146 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -273,29 +273,23 @@ impl StacksMessageCodec for HttpRequestPreamble { .map_err(CodecError::WriteError)?; // content-type - match self.content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(c.to_string().as_str().as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(ref c) = self.content_type { + fd.write_all("Content-Type: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(c.to_string().as_str().as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // content-length - match self.content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(l) = self.content_length { + fd.write_all("Content-Length: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(format!("{}", l).as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // keep-alive diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 00fa0948bd..a7e96a1912 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1232,25 +1232,22 @@ impl StacksHttp { /// This method will set up this state machine to consume the message associated with this /// premable, if the response is chunked. fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() && !self.allow_arbitrary_response { - return Err(NetError::DeserializeError( - "Unexpected HTTP response: no active request handler".to_string(), - )); + if let StacksHttpPreamble::Response(ref http_response_preamble) = preamble { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); } - if http_response_preamble.is_chunked() { - // we can only receive one response at a time - if self.reply.is_some() { - test_debug!("Have pending reply already"); - return Err(NetError::InProgress); - } - self.set_pending(http_response_preamble); - } + self.set_pending(http_response_preamble); } - _ => {} } Ok(()) } @@ -1275,9 +1272,8 @@ impl StacksHttp { return Err(NetError::InvalidState); } if let Some(reply) = self.reply.as_mut() { - match reply.stream.consume_data(fd).map_err(|e| { + match reply.stream.consume_data(fd).inspect_err(|_e| { self.reset(); - e })? { (Some((byte_vec, bytes_total)), sz) => { // done receiving @@ -1491,11 +1487,11 @@ impl ProtocolFamily for StacksHttp { } // message of unknown length. Buffer up and maybe we can parse it. - let (message_bytes_opt, num_read) = - self.consume_data(http_response_preamble, fd).map_err(|e| { - self.reset(); - e - })?; + let (message_bytes_opt, num_read) = self + .consume_data(http_response_preamble, fd) + .inspect_err(|_e| { + self.reset(); + })?; match message_bytes_opt { Some((message_bytes, total_bytes_consumed)) => { diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 58bf5e495e..430189c41e 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1534,15 +1534,12 @@ impl PeerNetwork { } // does the peer agree with our PoX view up to this reward cycle? - match stats.inv.pox_inv_cmp(&self.pox_id) { - Some((disagreed, _, _)) => { - if disagreed < target_block_reward_cycle { - // can't proceed - debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); - return Ok(0); - } + if let Some((disagreed, _, _)) = stats.inv.pox_inv_cmp(&self.pox_id) { + if disagreed < target_block_reward_cycle { + // can't proceed + debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); + return Ok(0); } - None => {} } let target_block_height = self @@ -1847,10 +1844,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getpoxinv_begin(request, target_pox_reward_cycle); if let Some(event_id) = event_id_opt { @@ -2040,10 +2034,7 @@ impl PeerNetwork { let message = self.sign_for_neighbor(nk, payload)?; let request = self .send_neighbor_message(nk, message, request_timeout) - .map_err(|e| { - debug!("Failed to send GetPoxInv to {:?}: {:?}", &nk, &e); - e - })?; + .inspect_err(|e| debug!("Failed to send GetPoxInv to {nk:?}: {e:?}"))?; stats.getblocksinv_begin(request, target_block_reward_cycle, num_blocks_expected); if let Some(event_id) = event_id_opt { @@ -2523,13 +2514,10 @@ impl PeerNetwork { let mut cur_neighbors = HashSet::new(); for (nk, event_id) in self.events.iter() { // only outbound authenticated peers - match self.peers.get(event_id) { - Some(convo) => { - if convo.is_outbound() && convo.is_authenticated() { - cur_neighbors.insert(nk.clone()); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.is_outbound() && convo.is_authenticated() { + cur_neighbors.insert(nk.clone()); } - None => {} } } @@ -2543,17 +2531,14 @@ impl PeerNetwork { /// Set a hint that we learned something new, and need to sync invs again pub fn hint_sync_invs(&mut self, target_height: u64) { - match self.inv_state { - Some(ref mut inv_state) => { - debug!( - "Awaken inv sync to re-scan peer block inventories at height {}", - target_height - ); - inv_state.hint_learned_data = true; - inv_state.hint_do_rescan = true; - inv_state.hint_learned_data_height = target_height; - } - None => {} + if let Some(ref mut inv_state) = self.inv_state { + debug!( + "Awaken inv sync to re-scan peer block inventories at height {}", + target_height + ); + inv_state.hint_learned_data = true; + inv_state.hint_do_rescan = true; + inv_state.hint_learned_data_height = target_height; } } @@ -2605,18 +2590,13 @@ impl PeerNetwork { // if this succeeds, then we should be able to make a BlocksInv let ancestor_sn = self .get_ancestor_sortition_snapshot(sortdb, target_block_height) - .map_err(|e| { - debug!( - "Failed to load ancestor sortition snapshot at height {}: {:?}", - target_block_height, &e - ); - e + .inspect_err(|e| { + debug!( "Failed to load ancestor sortition snapshot at height {target_block_height}: {e:?}") })?; - let tip_sn = self.get_tip_sortition_snapshot(sortdb).map_err(|e| { - debug!("Failed to load tip sortition snapshot: {:?}", &e); - e - })?; + let tip_sn = self + .get_tip_sortition_snapshot(sortdb) + .inspect_err(|e| debug!("Failed to load tip sortition snapshot: {e:?}"))?; let getblocksinv = GetBlocksInv { consensus_hash: ancestor_sn.consensus_hash, @@ -2634,12 +2614,11 @@ impl PeerNetwork { let blocks_inv = ConversationP2P::make_getblocksinv_response(self, sortdb, chainstate, &getblocksinv) - .map_err(|e| { + .inspect_err(|e| { debug!( - "Failed to load blocks inventory at reward cycle {} ({:?}): {:?}", - reward_cycle, &ancestor_sn.consensus_hash, &e - ); - e + "Failed to load blocks inventory at reward cycle {reward_cycle} ({:?}): {e:?}", + &ancestor_sn.consensus_hash + ); })?; match blocks_inv { diff --git a/stackslib/src/net/inv/nakamoto.rs b/stackslib/src/net/inv/nakamoto.rs index 74cc8f0d0e..9bebbaf642 100644 --- a/stackslib/src/net/inv/nakamoto.rs +++ b/stackslib/src/net/inv/nakamoto.rs @@ -982,24 +982,22 @@ impl NakamotoInvStateMachine { ); let Some(inv) = self.inventories.get_mut(&naddr) else { debug!( - "{:?}: Got a reply for an untracked inventory peer {}: {:?}", + "{:?}: Got a reply for an untracked inventory peer {naddr}: {reply:?}", network.get_local_peer(), - &naddr, - &reply ); continue; }; - let Ok(inv_learned) = inv.getnakamotoinv_try_finish(network, reply).map_err(|e| { - warn!( - "{:?}: Failed to finish inventory sync to {}: {:?}", - network.get_local_peer(), - &naddr, - &e - ); - self.comms.add_broken(network, &naddr); - e - }) else { + let Ok(inv_learned) = inv + .getnakamotoinv_try_finish(network, reply) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish inventory sync to {naddr}: {e:?}", + network.get_local_peer() + ); + self.comms.add_broken(network, &naddr); + }) + else { continue; }; @@ -1051,14 +1049,15 @@ impl NakamotoInvStateMachine { &e ); } - let Ok((_, learned)) = self.process_getnakamotoinv_finishes(network).map_err(|e| { - warn!( - "{:?}: Failed to finish Nakamoto tenure inventory sync: {:?}", - network.get_local_peer(), - &e - ); - e - }) else { + let Ok((_, learned)) = self + .process_getnakamotoinv_finishes(network) + .inspect_err(|e| { + warn!( + "{:?}: Failed to finish Nakamoto tenure inventory sync: {e:?}", + network.get_local_peer(), + ) + }) + else { self.last_sort_tip = Some(network.burnchain_tip.clone()); return false; }; diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index bdb25d0ce2..2a8cf7f523 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2376,11 +2376,8 @@ pub mod test { if self.closed { return Ok(0); } - match self.read_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.read_error { + return Err(io::Error::from((*e).clone())); } let sz = self.c.read(buf)?; @@ -2403,11 +2400,8 @@ pub mod test { if self.closed { return Err(io::Error::from(ErrorKind::Other)); // EBADF } - match self.write_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.write_error { + return Err(io::Error::from((*e).clone())); } self.c.write(buf) } @@ -2799,11 +2793,8 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(config); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); @@ -3610,11 +3601,8 @@ pub mod test { ch: &ConsensusHash, ) { for op in blockstack_ops.iter_mut() { - match op { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - data.consensus_hash = (*ch).clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); } } } diff --git a/stackslib/src/net/neighbors/comms.rs b/stackslib/src/net/neighbors/comms.rs index 821952af33..48759c913d 100644 --- a/stackslib/src/net/neighbors/comms.rs +++ b/stackslib/src/net/neighbors/comms.rs @@ -106,14 +106,12 @@ pub trait NeighborComms { let msg = network .sign_for_neighbor(&nk, StacksMessageType::Handshake(handshake_data)) - .map_err(|e| { + .inspect_err(|_e| { info!( - "{:?}: Failed to sign for peer {:?}", + "{:?}: Failed to sign for peer {nk:?}", network.get_local_peer(), - &nk ); self.add_dead(network, &nk); - e })?; network diff --git a/stackslib/src/net/neighbors/db.rs b/stackslib/src/net/neighbors/db.rs index a9d2268fca..3b1d99e906 100644 --- a/stackslib/src/net/neighbors/db.rs +++ b/stackslib/src/net/neighbors/db.rs @@ -223,26 +223,22 @@ pub trait NeighborWalkDB { // favor neighbors with older last-contact times let next_neighbors_res = self .get_fresh_random_neighbors(network, (NUM_NEIGHBORS as u64) * 2) - .map_err(|e| { + .inspect_err(|e| { debug!( - "{:?}: Failed to load fresh initial walk neighbors: {:?}", + "{:?}: Failed to load fresh initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e }); let db_neighbors = if let Ok(neighbors) = next_neighbors_res { neighbors } else { let any_neighbors = Self::pick_walk_neighbors(network, (NUM_NEIGHBORS as u64) * 2, 0) - .map_err(|e| { + .inspect_err(|e| { info!( - "{:?}: Failed to load any initial walk neighbors: {:?}", + "{:?}: Failed to load any initial walk neighbors: {e:?}", network.get_local_peer(), - &e ); - e })?; any_neighbors diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index cc3fd73db8..f0d3cf18b7 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -388,11 +388,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier table size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier table size: {}", &self.local_peer, count); }; debug!("{:?}: Walk finished ===================", &self.local_peer); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 737605c8c0..6e2e8ce461 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1153,13 +1153,10 @@ impl PeerNetwork { ) -> u64 { let mut ret = 0; for (_, socket) in sockets.iter() { - match socket.peer_addr() { - Ok(addr) => { - if addr.ip() == ipaddr.ip() { - ret += 1; - } + if let Ok(addr) = socket.peer_addr() { + if addr.ip() == ipaddr.ip() { + ret += 1; } - Err(_) => {} }; } ret @@ -1386,12 +1383,9 @@ impl PeerNetwork { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { info!("Request to ban {:?}", neighbor_key); - match self.events.get(neighbor_key) { - Some(event_id) => { - debug!("Will ban {:?} (event {})", neighbor_key, event_id); - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor_key) { + debug!("Will ban {:?} (event {})", neighbor_key, event_id); + self.bans.insert(*event_id); } } Ok(()) @@ -1474,28 +1468,25 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { - match self.handles.get(i) { - Some(handle) => { - loop { - // drain all inbound requests - let inbound_request_res = handle.chan_in.try_recv(); - match inbound_request_res { - Ok(inbound_request) => { - messages.push((i, inbound_request)); - } - Err(TryRecvError::Empty) => { - // nothing to do - break; - } - Err(TryRecvError::Disconnected) => { - // dead; remove - to_remove.push(i); - break; - } + if let Some(handle) = self.handles.get(i) { + loop { + // drain all inbound requests + let inbound_request_res = handle.chan_in.try_recv(); + match inbound_request_res { + Ok(inbound_request) => { + messages.push((i, inbound_request)); + } + Err(TryRecvError::Empty) => { + // nothing to do + break; + } + Err(TryRecvError::Disconnected) => { + // dead; remove + to_remove.push(i); + break; } } } - None => {} } } @@ -1893,11 +1884,8 @@ impl PeerNetwork { /// Deregister a socket from our p2p network instance. fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { - match self.network { - Some(ref mut network) => { - let _ = network.deregister(event_id, &socket); - } - None => {} + if let Some(ref mut network) = self.network { + let _ = network.deregister(event_id, &socket); } } @@ -1977,11 +1965,8 @@ impl PeerNetwork { /// Deregister and ban a neighbor pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); - match self.events.get(neighbor) { - Some(event_id) => { - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor) { + self.bans.insert(*event_id); } self.relayer_stats.process_neighbor_ban(neighbor); @@ -2700,22 +2685,16 @@ impl PeerNetwork { &self.local_peer.private_key, StacksMessageType::NatPunchRequest(nonce), ) - .map_err(|e| { - info!("Failed to sign NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to sign NAT punch request: {e:?}"))?; let mut rh = convo .send_signed_request(natpunch_request, self.connection_opts.timeout) - .map_err(|e| { - info!("Failed to send NAT punch request: {:?}", &e); - e - })?; + .inspect_err(|e| info!("Failed to send NAT punch request: {e:?}"))?; - self.saturate_p2p_socket(event_id, &mut rh).map_err(|e| { - info!("Failed to saturate NAT punch socket on event {}", &event_id); - e - })?; + self.saturate_p2p_socket(event_id, &mut rh) + .inspect_err(|_e| { + info!("Failed to saturate NAT punch socket on event {event_id}") + })?; self.public_ip_reply_handle = Some(rh); break; @@ -3685,13 +3664,11 @@ impl PeerNetwork { // always do block download let new_blocks = self .do_network_block_sync_nakamoto(burnchain_height, sortdb, chainstate, ibd) - .map_err(|e| { + .inspect_err(|e| { warn!( - "{:?}: Failed to perform Nakamoto block sync: {:?}", - &self.get_local_peer(), - &e - ); - e + "{:?}: Failed to perform Nakamoto block sync: {e:?}", + &self.get_local_peer() + ) }) .unwrap_or_default(); @@ -4425,13 +4402,7 @@ impl PeerNetwork { sortdb, &OnChainRewardSetProvider::new(), ) - .map_err(|e| { - warn!( - "Failed to load reward cycle info for cycle {}: {:?}", - rc, &e - ); - e - }) + .inspect_err(|e| warn!("Failed to load reward cycle info for cycle {rc}: {e:?}")) .unwrap_or(None) else { continue; }; diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 1ad6b4ffc8..ac9cb361e5 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -322,18 +322,15 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(event_id) { - Some(convo) => { - if !convo.stats.outbound { - let stats = convo.stats.clone(); - if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { - entry.push((*event_id, nk.clone(), stats)); - } else { - ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); - } + if let Some(convo) = self.peers.get(event_id) { + if !convo.stats.outbound { + let stats = convo.stats.clone(); + if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { + entry.push((*event_id, nk.clone(), stats)); + } else { + ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); } } - None => {} } } @@ -378,15 +375,12 @@ impl PeerNetwork { let mut outbound: Vec = vec![]; for (nk, event_id) in self.events.iter() { - match self.peers.get(event_id) { - Some(convo) => { - if convo.stats.outbound { - outbound.push(format!("{:?}", &nk)); - } else { - inbound.push(format!("{:?}", &nk)); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.stats.outbound { + outbound.push(format!("{:?}", &nk)); + } else { + inbound.push(format!("{:?}", &nk)); } - None => {} } } (inbound, outbound) @@ -464,11 +458,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier size: {}", &self.local_peer, count); }; } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 4e4acb3f8a..a3f0117c4a 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -949,14 +949,12 @@ impl Relayer { if chainstate .nakamoto_blocks_db() .has_nakamoto_block_with_index_hash(&block.header.block_id()) - .map_err(|e| { + .inspect_err(|e| { warn!( - "Failed to determine if we have Nakamoto block {}/{}: {:?}", + "Failed to determine if we have Nakamoto block {}/{}: {e:?}", &block.header.consensus_hash, - &block.header.block_hash(), - &e + &block.header.block_hash() ); - e })? { if force_broadcast { @@ -1823,52 +1821,49 @@ impl Relayer { &tx.txid(), &ast_rules ); - match tx.payload { - TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) => { - let clarity_version = - clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = + tx.payload + { + let clarity_version = + clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); + + if ast_rules == ASTRules::PrecheckSize { + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = QualifiedContractIdentifier::new( - issuer_principal, - smart_contract.name.clone(), - ); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } - _ => {} } Ok(()) } @@ -3135,21 +3130,22 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); continue; } }; // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to announce {} entries to {:?}: {:?}", - &self.local_peer, num_blocks, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to announce {num_blocks} entries to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } } @@ -3170,26 +3166,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push block {}/{} to {:?}", - &self.local_peer, &ch, &blk_hash, recipient + "{:?}: Push block {ch}/{blk_hash} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push block {}/{} to {:?}: {:?}", - &self.local_peer, &ch, &blk_hash, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push block {ch}/{blk_hash} to {recipient:?}: {e:?}", + &self.local_peer + ) + }); } /// Try to push a confirmed microblock stream to a peer. @@ -3210,26 +3207,27 @@ impl PeerNetwork { Ok(m) => m, Err(e) => { warn!( - "{:?}: Failed to sign for {:?}: {:?}", - &self.local_peer, recipient, &e + "{:?}: Failed to sign for {recipient:?}: {e:?}", + &self.local_peer ); return; } }; debug!( - "{:?}: Push microblocks for {} to {:?}", - &self.local_peer, &idx_bhh, recipient + "{:?}: Push microblocks for {idx_bhh} to {recipient:?}", + &self.local_peer ); // absorb errors - let _ = self.relay_signed_message(recipient, message).map_err(|e| { - warn!( - "{:?}: Failed to push microblocks for {} to {:?}: {:?}", - &self.local_peer, &idx_bhh, recipient, &e - ); - e - }); + let _ = self + .relay_signed_message(recipient, message) + .inspect_err(|e| { + warn!( + "{:?}: Failed to push microblocks for {idx_bhh} to {recipient:?}: {e:?}", + &self.local_peer + ); + }); } /// Announce blocks that we have to an outbound peer that doesn't have them. diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 7370f9cdf0..1df56c299b 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -429,56 +429,52 @@ impl HttpPeer { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; let recv_res = convo.recv(client_sock); - match recv_res { - Err(e) => { - match e { - net_error::PermanentlyDrained => { - // socket got closed, but we might still have pending unsolicited messages - debug!( - "Remote HTTP peer disconnected event {} (socket {:?})", - event_id, &client_sock - ); - convo_dead = true; - } - net_error::InvalidMessage => { - // got sent bad data. If this was an inbound conversation, send it a HTTP - // 400 and close the socket. - debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error(StacksHttpResponse::new_empty_error( - &HttpBadRequest::new( - "Received an HTTP message that the node could not decode" - .to_string(), - ), - )) { - Ok(_) => { - // prime the socket - if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - // convo_dead = true; - } - } - Err(e) => { + if let Err(e) = recv_res { + match e { + net_error::PermanentlyDrained => { + // socket got closed, but we might still have pending unsolicited messages + debug!( + "Remote HTTP peer disconnected event {} (socket {:?})", + event_id, &client_sock + ); + convo_dead = true; + } + net_error::InvalidMessage => { + // got sent bad data. If this was an inbound conversation, send it a HTTP + // 400 and close the socket. + debug!("Got a bad HTTP message on socket {:?}", &client_sock); + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode".to_string(), + ), + )) { + Ok(_) => { + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { debug!( - "Failed to reply HTTP 400 to socket {:?}: {:?}", + "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } + Err(e) => { + debug!( + "Failed to reply HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; + } } - _ => { - debug!( - "Failed to receive HTTP data on event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + } + _ => { + debug!( + "Failed to receive HTTP data on event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } - Ok(_) => {} } // react to inbound messages -- do we need to send something out, or fulfill requests @@ -730,11 +726,8 @@ mod test { peer.step().unwrap(); // asked to yield? - match http_rx.try_recv() { - Ok(_) => { - break; - } - Err(_) => {} + if http_rx.try_recv().is_ok() { + break; } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index a6b4a83df5..ae81703c53 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -218,7 +218,7 @@ fn test_walk_ring_15_org_biased() { let peers = test_walk_ring(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -226,11 +226,8 @@ fn test_walk_ring_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -398,7 +395,7 @@ fn test_walk_line_15_org_biased() { let peers = test_walk_line(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -406,11 +403,8 @@ fn test_walk_line_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -634,7 +628,7 @@ fn test_walk_star_15_org_biased() { let peers = test_walk_star(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -642,11 +636,8 @@ fn test_walk_star_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -849,14 +840,11 @@ fn dump_peers(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); } - None => {} } } @@ -879,16 +867,13 @@ fn dump_peer_histograms(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); } - None => {} } } for inbound in inbound_neighbor_index.iter() { @@ -998,32 +983,26 @@ fn run_topology_test_ex( debug!("Step peer {:?}", &nk); // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } + if let Some(peer_list) = initial_allowed.get(&nk) { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); } } - None => {} }; // denied peers are never connected - match initial_denied.get(&nk) { - Some(peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } + if let Some(peer_list) = initial_denied.get(&nk) { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); } } - None => {} }; // all ports are unique in the p2p socket table diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 9d0bdbc6b7..50ec1b1c03 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -171,20 +171,14 @@ fn test_get_block_availability() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -216,10 +210,10 @@ fn test_get_block_availability() { }) } -fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { +fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData { let block_hashes = { let num_headers = end_height - start_height; - let ic = peer.sortdb.as_mut().unwrap().index_conn(); + let ic = peer.sortdb.as_ref().unwrap().index_conn(); let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap(); let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id) .unwrap() @@ -233,7 +227,7 @@ fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) }; let inv = peer - .chainstate() + .chainstate_ref() .get_blocks_inventory(&block_hashes) .unwrap(); inv @@ -471,11 +465,7 @@ where info!("Completed walk round {} step(s)", round); - let mut peer_invs = vec![]; for peer in peers.iter_mut() { - let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks); - peer_invs.push(peer_inv); - let availability = get_peer_availability( peer, first_stacks_block_height - first_sortition_height, @@ -562,12 +552,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -843,12 +830,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -934,12 +918,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1008,12 +989,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1090,12 +1068,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1253,11 +1228,8 @@ pub fn test_get_blocks_and_microblocks_ban_url() { |_| {}, |peer| { let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + blocked = dl.blocked_urls.len(); } if blocked >= 1 { // NOTE: this is the success criterion @@ -1474,12 +1446,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index d6d7d9557a..949c9ad383 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1390,22 +1390,16 @@ fn test_sync_inv_2_peers_plain() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -1553,46 +1547,38 @@ fn test_sync_inv_2_peers_stale() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { - if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - for i in 0..first_stacks_block_height { - assert!(!peer_2_inv.inv.has_ith_block(i)); - assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); - } - peer_2_check = true; + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if peer_2_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + for i in 0..first_stacks_block_height { + assert!(!peer_2_inv.inv.has_ith_block(i)); + assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); } + peer_2_check = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { - if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - peer_1_check = true; - } + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if peer_1_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + peer_1_check = true; } } - None => {} } round += 1; @@ -1703,54 +1689,48 @@ fn test_sync_inv_2_peers_unstable() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_1_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_1_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { - peer_1_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { - peer_1_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_1_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_1_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { + peer_1_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { + peer_1_block_cycle = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_2_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_2_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { - peer_2_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { - peer_2_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_2_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_2_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { + peer_2_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { + peer_2_block_cycle = true; } } - None => {} } round += 1; @@ -1917,42 +1897,30 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let _ = peer_2.step(); // peer 1 should see that peer 2 has all blocks for reward cycles 5 through 9 - match peer_1.network.inv_state { - Some(ref inv) => { - inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); - peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); + peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); }; // peer 2 should see that peer 1 has all blocks up to where we stopped feeding them to // it - match peer_2.network.inv_state { - Some(ref inv) => { - inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); - peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); + peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index a2483fa052..220c671f0c 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -142,7 +142,7 @@ pub fn peer_get_nakamoto_invs<'a>( loop { peer.step_with_ibd(false).unwrap(); - if let Ok(..) = shutdown_recv.try_recv() { + if shutdown_recv.try_recv().is_ok() { break; } } @@ -1085,22 +1085,16 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .unwrap_or(0); // nothing should break - match peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match other_peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = other_peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index 700a64a739..9576ae7e54 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -757,7 +757,6 @@ fn test_mempool_sync_2_peers_problematic() { let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone(); // fill peer 1 with lots of transactions - let mut txs = HashMap::new(); let mut peer_1_mempool = peer_1.mempool.take().unwrap(); let mut mempool_tx = peer_1_mempool.tx_begin().unwrap(); for i in 0..num_txs { @@ -784,8 +783,6 @@ fn test_mempool_sync_2_peers_problematic() { let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce); let tx_fee = tx.get_tx_fee(); - txs.insert(tx.txid(), tx.clone()); - // should succeed MemPoolDB::try_add_tx( &mut mempool_tx, @@ -805,7 +802,7 @@ fn test_mempool_sync_2_peers_problematic() { ) .unwrap(); - eprintln!("Added {} {}", i, &txid); + eprintln!("Added {i} {txid}"); } mempool_tx.commit().unwrap(); peer_1.mempool = Some(peer_1_mempool); diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs index f21aba3cad..c4684acf14 100644 --- a/stackslib/src/net/tests/mod.rs +++ b/stackslib/src/net/tests/mod.rs @@ -675,7 +675,6 @@ impl NakamotoBootPlan { let mut all_blocks = vec![]; let mut malleablized_block_ids = HashSet::new(); - let mut consensus_hashes = vec![]; let mut last_tenure_change: Option = None; let mut blocks_since_last_tenure = 0; @@ -757,7 +756,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(next_consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() @@ -858,7 +856,6 @@ impl NakamotoBootPlan { }); peer.refresh_burnchain_view(); - consensus_hashes.push(consensus_hash); let blocks: Vec = blocks_and_sizes .into_iter() .map(|(block, _, _)| block) @@ -954,14 +951,13 @@ impl NakamotoBootPlan { // each transaction was mined in the same order as described in the boot plan, // and it succeeded. - let mut burn_receipts = vec![]; let mut stacks_receipts = vec![]; for receipt in observed_block.receipts.iter() { match &receipt.transaction { TransactionOrigin::Stacks(..) => { stacks_receipts.push(receipt); } - TransactionOrigin::Burn(..) => burn_receipts.push(receipt), + TransactionOrigin::Burn(..) => {} } } diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1e3fa76cb..8c56b48b0d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -68,20 +68,14 @@ fn test_step_walk_1_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -184,22 +178,16 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; if let Some(s) = peer_1 @@ -306,20 +294,14 @@ fn test_step_walk_1_neighbor_denied() { walk_1_retries = peer_1.network.walk_retries; walk_2_retries = peer_2.network.walk_retries; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -400,20 +382,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { walk_1_retries = peer_1.network.walk_attempts; walk_2_retries = peer_2.network.walk_attempts; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -463,20 +439,14 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -573,23 +543,17 @@ fn test_step_walk_1_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 2 never gets added to peer 1's frontier - assert!(!w.frontier.contains_key(&neighbor_2.addr)); - } - None => {} + // peer 2 never gets added to peer 1's frontier + assert!(!w.frontier.contains_key(&neighbor_2.addr)); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -657,23 +621,17 @@ fn test_step_walk_1_neighbor_behind() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 1 never gets added to peer 2's frontier - assert!(!w.frontier.contains_key(&neighbor_1.addr)); - } - None => {} + // peer 1 never gets added to peer 2's frontier + assert!(!w.frontier.contains_key(&neighbor_1.addr)); }; i += 1; @@ -789,20 +747,14 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -942,20 +894,14 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; steps += 1; @@ -1091,20 +1037,14 @@ fn test_step_walk_2_neighbors_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -1371,28 +1311,19 @@ fn test_step_walk_3_neighbors_inbound() { ); test_debug!("========"); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_3.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_3.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; for (i, peer) in [&peer_1, &peer_2, &peer_3].iter().enumerate() { @@ -1542,20 +1473,14 @@ fn test_step_walk_2_neighbors_rekey() { let _ = peer_1.step(); let _ = peer_2.step(); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; } @@ -1649,20 +1574,14 @@ fn test_step_walk_2_neighbors_different_networks() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 9d3de7aacd..2729d648eb 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1695,23 +1695,17 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut peer_0_to_1 = false; let mut peer_1_to_0 = false; for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } + if let Some(convo) = peers[0].network.peers.get(event_id) { + if *nk == peer_1_nk { + peer_0_to_1 = true; } - None => {} } } for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } + if let Some(convo) = peers[1].network.peers.get(event_id) { + if *nk == peer_0_nk { + peer_1_to_0 = true; } - None => {} } } @@ -3722,17 +3716,14 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( + if let Err(e) = node.chainstate.will_admit_mempool_tx( &sortdb.index_handle(&tip.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, versioned_contract_len as u64, ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} + panic!("will_admit_mempool_tx {:?}", &e); }; peer.sortdb = Some(sortdb); diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs index d796a7799a..569265803b 100644 --- a/stackslib/src/net/tests/relay/nakamoto.rs +++ b/stackslib/src/net/tests/relay/nakamoto.rs @@ -212,9 +212,6 @@ impl SeedNode { let test_stackers = peer.config.test_stackers.take().unwrap(); let mut all_blocks: Vec = vec![]; - let mut all_burn_ops = vec![]; - let mut rc_blocks = vec![]; - let mut rc_burn_ops = vec![]; // have the peer mine some blocks for two reward cycles for i in 0..(2 * rc_len) { @@ -330,15 +327,10 @@ impl SeedNode { .burnchain .is_reward_cycle_start(tip.block_height) { - rc_blocks.push(all_blocks.clone()); - rc_burn_ops.push(all_burn_ops.clone()); - - all_burn_ops.clear(); all_blocks.clear(); } all_blocks.append(&mut blocks); - all_burn_ops.push(burn_ops); } peer.config.test_signers = Some(test_signers); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 1dbd3d7c37..6df40eaebe 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -481,21 +481,18 @@ impl PeerNetwork { if need_block { // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); + if let Some(ref mut downloader) = self.block_downloader { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); - } - self.have_data_to_download = true; + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } - None => {} + self.have_data_to_download = true; } } } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 87031676db..af9a4d98a7 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -32,13 +32,10 @@ pub mod test { let mut done = false; while get_epoch_time_secs() <= deadline { sleep_ms(1000); - match rx.try_recv() { - Ok(success) => { - assert!(success); - done = true; - break; - } - Err(_) => {} + if let Ok(success) = rx.try_recv() { + assert!(success); + done = true; + break; } } diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs index 97d194d1a4..a593cae313 100644 --- a/stackslib/src/util_lib/strings.rs +++ b/stackslib/src/util_lib/strings.rs @@ -353,7 +353,7 @@ mod test { let mut contract_bytes = vec![s.len() as u8]; contract_bytes.extend_from_slice(contract_str.as_bytes()); - check_codec_and_corruption::(&contract_str, &clarity_bytes); + check_codec_and_corruption::(&contract_str, &contract_bytes); } #[test] diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs index 664915061f..2203a8c552 100644 --- a/testnet/stacks-node/src/event_dispatcher.rs +++ b/testnet/stacks-node/src/event_dispatcher.rs @@ -2275,6 +2275,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(request) = server.recv() { attempt += 1; @@ -2341,6 +2343,8 @@ mod test { let server = Server::http(format!("127.0.0.1:{port}")).unwrap(); thread::spawn(move || { let mut attempt = 0; + // This exists to only keep request from being dropped + #[allow(clippy::collection_is_never_read)] let mut _request_holder = None; while let Ok(mut request) = server.recv() { attempt += 1; diff --git a/testnet/stacks-node/src/globals.rs b/testnet/stacks-node/src/globals.rs index 2a9a601723..ca96a1f81c 100644 --- a/testnet/stacks-node/src/globals.rs +++ b/testnet/stacks-node/src/globals.rs @@ -53,7 +53,7 @@ pub struct Globals { unconfirmed_txs: Arc>, /// Writer endpoint to the relayer thread pub relay_send: SyncSender, - /// Cointer state in the main thread + /// Counter state in the main thread pub counters: Counters, /// Connection to the PoX sync watchdog pub sync_comms: PoxSyncWatchdogComms, diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb233737bb..16b33ead7a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,6 +21,7 @@ use std::thread; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; @@ -177,6 +178,10 @@ pub struct BlockMinerThread { last_block_mined: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, + /// Cost consumed by the current tenure + tenure_cost: ExecutionCost, + /// Cost budget for the current tenure + tenure_budget: ExecutionCost, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -237,6 +242,8 @@ impl BlockMinerThread { burn_tip_at_start: burn_tip_at_start.clone(), tenure_change_time: Instant::now(), abort_flag: Arc::new(AtomicBool::new(false)), + tenure_cost: ExecutionCost::ZERO, + tenure_budget: ExecutionCost::ZERO, } } @@ -1183,7 +1190,7 @@ impl BlockMinerThread { } // build the block itself - let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( + let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db .index_handle_at_ch(&self.burn_block.consensus_hash) @@ -1210,39 +1217,48 @@ impl BlockMinerThread { e })?; - if block.txs.is_empty() { + if block_metadata.block.txs.is_empty() { return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key - .sign(block.header.miner_signature_hash().as_bytes()) + .sign( + block_metadata + .block + .header + .miner_signature_hash() + .as_bytes(), + ) .map_err(NakamotoNodeError::MinerSignatureError)?; - block.header.miner_signature = miner_signature; + block_metadata.block.header.miner_signature = miner_signature; info!( "Miner: Assembled block #{} for signer set proposal: {}, with {} txs", - block.header.chain_length, - block.header.block_hash(), - block.txs.len(); - "signer_sighash" => %block.header.signer_signature_hash(), - "consensus_hash" => %block.header.consensus_hash, - "parent_block_id" => %block.header.parent_block_id, - "timestamp" => block.header.timestamp, + block_metadata.block.header.chain_length, + block_metadata.block.header.block_hash(), + block_metadata.block.txs.len(); + "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "consensus_hash" => %block_metadata.block.header.consensus_hash, + "parent_block_id" => %block_metadata.block.header.parent_block_id, + "timestamp" => block_metadata.block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( self.burn_block.block_height, - &block, - size, - &consumed, - tx_events, + &block_metadata.block, + block_metadata.tenure_size, + &block_metadata.tenure_consumed, + block_metadata.tx_events, ); + self.tenure_cost = block_metadata.tenure_consumed; + self.tenure_budget = block_metadata.tenure_budget; + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; - Ok(block) + Ok(block_metadata.block) } #[cfg_attr(test, mutants::skip)] @@ -1273,8 +1289,20 @@ impl BlockMinerThread { } } }; + // Check if we can and should include a time-based tenure extend. if self.last_block_mined.is_some() { - // Check if we can extend the current tenure + // Do not extend if we have spent < 50% of the budget, since it is + // not necessary. + let usage = self + .tenure_budget + .proportion_largest_dimension(&self.tenure_cost); + if usage < self.config.miner.tenure_extend_cost_threshold { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); if get_epoch_time_secs() <= tenure_extend_timestamp && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout @@ -1284,6 +1312,7 @@ impl BlockMinerThread { tenure_change_tx: None, }); } + info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, diff --git a/testnet/stacks-node/src/nakamoto_node/relayer.rs b/testnet/stacks-node/src/nakamoto_node/relayer.rs index e8b848e748..2cbc37acff 100644 --- a/testnet/stacks-node/src/nakamoto_node/relayer.rs +++ b/testnet/stacks-node/src/nakamoto_node/relayer.rs @@ -694,6 +694,9 @@ impl RelayerThread { /// this sortition matches the sortition tip and we have a parent to build atop. /// /// Otherwise, returns None, meaning no action will be taken. + // This method is covered by the e2e bitcoind tests, which do not show up + // in mutant coverage. + #[cfg_attr(test, mutants::skip)] fn process_sortition( &mut self, consensus_hash: ConsensusHash, @@ -705,8 +708,16 @@ impl RelayerThread { .expect("FATAL: unknown consensus hash"); // always clear this even if this isn't the latest sortition - let cleared = self.last_commits.remove(&sn.winning_block_txid); - let won_sortition = sn.sortition && cleared; + let _cleared = self.last_commits.remove(&sn.winning_block_txid); + let was_winning_pkh = if let (Some(ref winning_pkh), Some(ref my_pkh)) = + (sn.miner_pk_hash, self.get_mining_key_pkh()) + { + winning_pkh == my_pkh + } else { + false + }; + + let won_sortition = sn.sortition && was_winning_pkh; if won_sortition { increment_stx_blocks_mined_counter(); } @@ -1628,7 +1639,7 @@ impl RelayerThread { self.last_commits.insert(txid); self.globals .counters - .bump_naka_submitted_commits(last_committed.burn_tip.block_height); + .bump_naka_submitted_commits(last_committed.burn_tip.block_height, tip_height); self.last_committed = Some(last_committed); Ok(()) diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 58d5f2da89..2138b7e767 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,9 +13,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::BTreeMap; +use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +use std::time::{Duration, Instant}; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -66,6 +69,8 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, + /// The timeout configuration based on the percentage of rejections + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -101,6 +106,14 @@ impl SignerCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u32; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + let mut sc = Self { message_key, is_mainnet, @@ -111,6 +124,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), + block_rejection_timeout_steps, }; // Spawn the signer DB listener thread @@ -293,16 +307,38 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { + // the amount of current rejections (used to eventually modify the timeout) + let mut rejections: u32 = 0; + // default timeout (the 0 entry must be always present) + let mut rejections_timeout = self + .block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; + + // this is used to track the start of the waiting cycle + let rejections_timer = Instant::now(); loop { + // At every iteration wait for the block_status. + // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) + // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, EVENT_RECEIVER_POLL, |status| { - status.total_weight_signed < self.weight_threshold - && status - .total_reject_weight - .saturating_add(self.weight_threshold) - <= self.total_weight + // rejections-based timeout expired? + if rejections_timer.elapsed() > *rejections_timeout { + return false; + } + // number or rejections changed? + if status.total_reject_weight != rejections { + return false; + } + // enough signatures? + return status.total_weight_signed < self.weight_threshold; }, )? { Some(status) => status, @@ -336,10 +372,44 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } + if rejections_timer.elapsed() > *rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Timed out while waiting for signatures".into(), + )); + } + continue; } }; + if rejections != block_status.total_reject_weight { + rejections = block_status.total_reject_weight; + let (rejections_step, new_rejections_timeout) = self + .block_rejection_timeout_steps + .range((Included(0), Included(rejections))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; + rejections_timeout = new_rejections_timeout; + info!("Number of received rejections updated, resetting timeout"; + "rejections" => rejections, + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_step" => rejections_step, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); + + counters.set_miner_current_rejections_timeout_secs(rejections_timeout.as_secs()); + counters.set_miner_current_rejections(rejections); + } + if block_status .total_reject_weight .saturating_add(self.weight_threshold) @@ -357,10 +427,18 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else { + } else if rejections_timer.elapsed() > *rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unblocked without reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); + } else { + continue; } } } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 277b9612fa..299335f35f 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -116,6 +116,10 @@ pub struct Counters { pub naka_mined_tenures: RunLoopCounter, pub naka_signer_pushed_blocks: RunLoopCounter, pub naka_miner_directives: RunLoopCounter, + pub naka_submitted_commit_last_stacks_tip: RunLoopCounter, + + pub naka_miner_current_rejections: RunLoopCounter, + pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, #[cfg(test)] pub naka_skip_commit_op: TestFlag, @@ -170,11 +174,19 @@ impl Counters { Counters::inc(&self.naka_submitted_vrfs); } - pub fn bump_naka_submitted_commits(&self, committed_height: u64) { + pub fn bump_naka_submitted_commits( + &self, + committed_burn_height: u64, + committed_stacks_height: u64, + ) { Counters::inc(&self.naka_submitted_commits); Counters::set( &self.naka_submitted_commit_last_burn_height, - committed_height, + committed_burn_height, + ); + Counters::set( + &self.naka_submitted_commit_last_stacks_tip, + committed_stacks_height, ); } @@ -205,6 +217,14 @@ impl Counters { pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } + + pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) + } + + pub fn set_miner_current_rejections(&self, value: u32) { + Counters::set(&self.naka_miner_current_rejections, u64::from(value)) + } } /// Coordinating a node running in neon mode. diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs index 68e5f60fd1..b287d2dec4 100644 --- a/testnet/stacks-node/src/tests/epoch_21.rs +++ b/testnet/stacks-node/src/tests/epoch_21.rs @@ -1045,7 +1045,6 @@ fn transition_adds_get_pox_addr_recipients() { ); let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let mut expected_pox_addrs = HashSet::new(); @@ -1056,7 +1055,6 @@ fn transition_adds_get_pox_addr_recipients() { let spender_addr: PrincipalData = to_addr(&spender_sk).into(); spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, @@ -1353,8 +1351,6 @@ fn transition_adds_mining_from_segwit() { u32::MAX, ); - let mut spender_sks = vec![]; - let mut spender_addrs = vec![]; let mut initial_balances = vec![]; let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); @@ -1363,8 +1359,6 @@ fn transition_adds_mining_from_segwit() { let spender_sk = StacksPrivateKey::random(); let spender_addr: PrincipalData = to_addr(&spender_sk).into(); - spender_sks.push(spender_sk); - spender_addrs.push(spender_addr.clone()); initial_balances.push(InitialBalance { address: spender_addr.clone(), amount: stacked + 100_000, diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 30c3cfed3b..4099ce64f2 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -247,9 +247,15 @@ pub fn check_nakamoto_empty_block_heuristics() { continue; } let txs = test_observer::parse_transactions(block); - let has_tenure_change = txs - .iter() - .any(|tx| matches!(tx.payload, TransactionPayload::TenureChange(_))); + let has_tenure_change = txs.iter().any(|tx| { + matches!( + tx.payload, + TransactionPayload::TenureChange(TenureChangePayload { + cause: TenureChangeCause::BlockFound, + .. + }) + ) + }); if has_tenure_change { let only_coinbase_and_tenure_change = txs.iter().all(|tx| { matches!( @@ -687,7 +693,7 @@ where error!("Timed out waiting for check to process"); return Err("Timed out".into()); } - thread::sleep(Duration::from_millis(100)); + thread::sleep(Duration::from_millis(500)); } Ok(()) } @@ -722,14 +728,14 @@ pub fn next_block_and_process_new_stacks_block( pub fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], true, ) } @@ -739,14 +745,14 @@ pub fn next_block_and_mine_commit( pub fn next_block_and_commits_only( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], false, ) } @@ -759,98 +765,47 @@ pub fn next_block_and_commits_only( pub fn next_block_and_wait_for_commits( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &[&Arc>], - commits_submitted: &[&Arc], + node_confs: &[&Config], + node_counters: &[&Counters], wait_for_stacks_block: bool, ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.to_vec(); - let blocks_processed_before: Vec<_> = coord_channels + let infos_before: Vec<_> = node_confs.iter().map(|c| get_chain_info(c)).collect(); + let burn_ht_before = infos_before .iter() - .map(|x| { - x.lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed() - }) - .collect(); - let commits_before: Vec<_> = commits_submitted + .map(|info| info.burn_block_height) + .max() + .unwrap(); + let stacks_ht_before = infos_before .iter() - .map(|x| x.load(Ordering::SeqCst)) - .collect(); + .map(|info| info.stacks_tip_height) + .max() + .unwrap(); + let last_commit_burn_hts = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_burn_height); + let last_commit_stacks_hts = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_stacks_tip); - let mut block_processed_time: Vec> = vec![None; commits_before.len()]; - let mut commit_sent_time: Vec> = vec![None; commits_before.len()]; next_block_and(btc_controller, timeout_secs, || { - for i in 0..commits_submitted.len() { - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { - block_processed_time[i].replace(now); - } - if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { - commit_sent_time[i].replace(now); - } - } - + let burn_height_committed_to = + last_commit_burn_hts.clone().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { - for i in 0..commits_submitted.len() { - // just wait for the commit - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - if commits_sent <= commits_before[i] { - return Ok(false); - } - - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 1 { - continue; - } - return Ok(false); - } - return Ok(true); - } - - // waiting for both commit and stacks block - for i in 0..commits_submitted.len() { - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - - if blocks_processed > blocks_processed_before[i] { - // either we don't care about the stacks block count, or the block count advanced. - // Check the block-commits. - let block_processed_time = block_processed_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed block time wasn't set")?; - if commits_sent <= commits_before[i] { - return Ok(false); - } - let commit_sent_time = commit_sent_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed commit time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - continue; - } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 2 { - continue; - } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - continue; - } - return Ok(false); - } else { + Ok(burn_height_committed_to) + } else { + if !burn_height_committed_to { return Ok(false); } + let stacks_tip_committed_to = + last_commit_stacks_hts + .clone() + .all(|last_commit_stacks_height| { + last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before + }); + return Ok(stacks_tip_committed_to); } - Ok(true) }) } @@ -1535,6 +1490,7 @@ fn simple_neon_integration() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let node_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -1592,13 +1548,8 @@ fn simple_neon_integration() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // Submit a TX @@ -1646,13 +1597,8 @@ fn simple_neon_integration() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1742,6 +1688,220 @@ fn simple_neon_integration() { #[test] #[ignore] +/// Test a scenario in which a miner is restarted right before a tenure +/// which they won. The miner, on restart, should begin mining the new tenure. +fn restarting_miner() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let (mut naka_conf, _miner_account) = naka_neon_integration_conf(None); + let prom_bind = "127.0.0.1:6000".to_string(); + naka_conf.node.prometheus_bind = Some(prom_bind.clone()); + naka_conf.miner.activated_vrf_key_path = + Some(format!("{}/vrf_key", naka_conf.node.working_dir)); + naka_conf.miner.wait_on_interim_blocks = Duration::from_secs(5); + let sender_sk = Secp256k1PrivateKey::from_seed(&[1, 2, 1, 2, 1, 2]); + // setup sender + recipient for a test stx transfer + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 1000; + let send_fee = 100; + naka_conf.add_initial_balance( + PrincipalData::from(sender_addr).to_string(), + send_amt * 2 + send_fee, + ); + let sender_signer_sk = Secp256k1PrivateKey::from_seed(&[3, 2, 3, 2, 3, 2]); + let sender_signer_addr = tests::to_addr(&sender_signer_sk); + let mut signers = TestSigners::new(vec![sender_signer_sk]); + naka_conf.add_initial_balance(PrincipalData::from(sender_signer_addr).to_string(), 100000); + let stacker_sk = setup_stacker(&mut naka_conf); + + test_observer::spawn(); + test_observer::register_any(&mut naka_conf); + + let mut btcd_controller = BitcoinCoreController::new(naka_conf.clone()); + btcd_controller + .start_bitcoind() + .expect("Failed starting bitcoind"); + let mut btc_regtest_controller = BitcoinRegtestController::new(naka_conf.clone(), None); + btc_regtest_controller.bootstrap_chain(201); + + let mut run_loop = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let run_loop_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed, + naka_submitted_commits: commits_submitted, + naka_proposed_blocks: proposals_submitted, + .. + } = run_loop.counters(); + let rl1_counters = run_loop.counters(); + let coord_channel = run_loop.coordinator_channels(); + + let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); + let _run_loop_2_stopper = run_loop.get_termination_switch(); + let Counters { + blocks_processed: blocks_processed_2, + naka_proposed_blocks: proposals_submitted_2, + .. + } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); + + let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); + wait_for_runloop(&blocks_processed); + boot_to_epoch_3( + &naka_conf, + &blocks_processed, + &[stacker_sk], + &[sender_signer_sk], + &mut Some(&mut signers), + &mut btc_regtest_controller, + ); + + info!("Bootstrapped to Epoch-3.0 boundary, starting nakamoto miner"); + + let burnchain = naka_conf.get_burnchain(); + let sortdb = burnchain.open_sortition_db(true).unwrap(); + let (chainstate, _) = StacksChainState::open( + naka_conf.is_mainnet(), + naka_conf.burnchain.chain_id, + &naka_conf.get_chainstate_path_str(), + None, + ) + .unwrap(); + + let block_height_pre_3_0 = + NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap() + .stacks_block_height; + + info!("Nakamoto miner started..."); + blind_signer_multinode( + &signers, + &[&naka_conf, &naka_conf], + vec![proposals_submitted, proposals_submitted_2], + ); + + wait_for_first_naka_block_commit(60, &commits_submitted); + + // Mine 2 nakamoto tenures + for _i in 0..2 { + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl1_counters) + .unwrap(); + } + + let last_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => last_tip.stacks_block_height, + "is_nakamoto" => last_tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + // close the current miner + coord_channel + .lock() + .expect("Mutex poisoned") + .stop_chains_coordinator(); + run_loop_stopper.store(false, Ordering::SeqCst); + run_loop_thread.join().unwrap(); + + // mine a bitcoin block -- this should include a winning commit from + // the miner + btc_regtest_controller.build_next_block(1); + + // start it back up + + let _run_loop_thread = thread::spawn(move || run_loop_2.start(None, 0)); + wait_for_runloop(&blocks_processed_2); + + info!(" ================= RESTARTED THE MINER ================="); + + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "Latest tip"; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + wait_for(60, || { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + let stacks_tip_committed_to = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(tip.stacks_block_height > last_tip.stacks_block_height + && stacks_tip_committed_to > last_tip.stacks_block_height) + }) + .unwrap_or_else(|e| { + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + error!( + "Failed to get a new block after restart"; + "last_tip_height" => last_tip.stacks_block_height, + "latest_tip" => tip.stacks_block_height, + "error" => &e, + ); + + panic!("{e}") + }); + + // Mine 2 more nakamoto tenures + for _i in 0..2 { + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl2_counters) + .unwrap(); + } + + // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 + let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) + .unwrap() + .unwrap(); + info!( + "=== Last tip ==="; + "height" => tip.stacks_block_height, + "is_nakamoto" => tip.anchored_header.as_stacks_nakamoto().is_some(), + ); + + assert!(tip.anchored_header.as_stacks_nakamoto().is_some()); + + // Check that we aren't missing burn blocks + let bhh = u64::from(tip.burn_header_height); + // make sure every burn block after the nakamoto transition has a mined + // nakamoto block in it. + let missing = test_observer::get_missing_burn_blocks(220..=bhh).unwrap(); + + // This test was flakey because it was sometimes missing burn block 230, which is right at the Nakamoto transition + // So it was possible to miss a burn block during the transition + // But I don't it matters at this point since the Nakamoto transition has already happened on mainnet + // So just print a warning instead, don't count it as an error + let missing_is_error: Vec<_> = missing + .into_iter() + .filter(|i| match i { + 230 => { + warn!("Missing burn block {i}"); + false + } + _ => true, + }) + .collect(); + + if !missing_is_error.is_empty() { + panic!("Missing the following burn blocks: {missing_is_error:?}"); + } + + check_nakamoto_empty_block_heuristics(); + + assert!(tip.stacks_block_height >= block_height_pre_3_0 + 4); +} + +#[test] +#[ignore] +#[allow(non_snake_case)] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1751,7 +1911,12 @@ fn simple_neon_integration() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn flash_blocks_on_epoch_3() { +/// +/// NOTE: This test has been disabled because it's flaky, and we don't need to +/// test the Epoch 3 transition since it's already happened +/// +/// See issue [#5765](https://github.com/stacks-network/stacks-core/issues/5765) for details +fn flash_blocks_on_epoch_3_FLAKY() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -1794,6 +1959,7 @@ fn flash_blocks_on_epoch_3() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -1868,13 +2034,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // Submit a TX @@ -1910,13 +2070,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2493,6 +2647,7 @@ fn correct_burn_outs() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2665,12 +2820,9 @@ fn correct_burn_outs() { let prior_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - if let Err(e) = next_block_and_mine_commit( - &mut btc_regtest_controller, - 30, - &coord_channel, - &commits_submitted, - ) { + if let Err(e) = + next_block_and_mine_commit(&mut btc_regtest_controller, 30, &naka_conf, &counters) + { warn!( "Error while minting a bitcoin block and waiting for stacks-node activity: {e:?}" ); @@ -2810,6 +2962,7 @@ fn block_proposal_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2850,13 +3003,7 @@ fn block_proposal_api_endpoint() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -3083,7 +3230,7 @@ fn block_proposal_api_endpoint() { if ix == 1 { // release the test observer mutex so that the handler from 0 can finish! - hold_proposal_mutex.take(); + _ = hold_proposal_mutex.take(); } } @@ -3188,6 +3335,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3208,13 +3356,7 @@ fn miner_writes_proposed_block_to_stackerdb() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -3299,6 +3441,7 @@ fn vote_for_aggregate_key_burn_op() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3381,13 +3524,7 @@ fn vote_for_aggregate_key_burn_op() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -3437,13 +3574,7 @@ fn vote_for_aggregate_key_burn_op() { // the second block should process the vote, after which the vote should be set for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let mut vote_for_aggregate_key_found = false; @@ -4405,6 +4536,7 @@ fn burn_ops_integration_test() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -4456,13 +4588,7 @@ fn burn_ops_integration_test() { "Pre-stx operation should submit successfully" ); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); @@ -4590,13 +4716,7 @@ fn burn_ops_integration_test() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -4753,13 +4873,7 @@ fn burn_ops_integration_test() { // the second block should process the ops // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); for interim_block_ix in 0..2 { info!("Mining interim block {interim_block_ix}"); let blocks_processed_before = coord_channel @@ -5806,6 +5920,7 @@ fn nakamoto_attempt_time() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -5845,13 +5960,7 @@ fn nakamoto_attempt_time() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -6377,6 +6486,7 @@ fn signer_chainstate() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6454,13 +6564,7 @@ fn signer_chainstate() { // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; for i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { @@ -6971,6 +7075,7 @@ fn continue_tenure_extend() { naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7032,13 +7137,7 @@ fn continue_tenure_extend() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); wait_for(5, || { let blocks_processed = coord_channel @@ -7200,7 +7299,9 @@ fn continue_tenure_extend() { let mut tenure_block_founds = vec![]; let mut transfer_tx_included = false; let mut last_block_had_extend = false; - for block in test_observer::get_blocks() { + for pair in test_observer::get_blocks().windows(2) { + let prev_block = &pair[0]; + let block = &pair[1]; let mut has_extend = false; for tx in block["transactions"].as_array().unwrap() { let raw_tx = tx["raw_tx"].as_str().unwrap(); @@ -7221,8 +7322,10 @@ fn continue_tenure_extend() { tenure_extends.push(parsed); } TenureChangeCause::BlockFound => { - if last_block_had_extend { - panic!("Expected a Nakamoto block to happen after tenure extend block"); + if last_block_had_extend + && prev_block["transactions"].as_array().unwrap().len() <= 1 + { + panic!("Expected other transactions to happen after tenure extend"); } tenure_block_founds.push(parsed); } @@ -7466,6 +7569,7 @@ fn check_block_times() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7508,19 +7612,13 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &counters.naka_submitted_commits); - let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_start = epoch_3.start_height; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height + 1; - Ok(info.burn_block_height == epoch_3_start) - }) - .unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height + 1; + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let time0_value = call_read_only( &naka_conf, @@ -7578,16 +7676,13 @@ fn check_block_times() { Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); - last_stacks_block_height = stacks_block_height; // Repeat these tests for 5 tenures for _ in 0..5 { - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - stacks_block_height = info.stacks_tip_height as u128; - Ok(stacks_block_height > last_stacks_block_height) - }) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + last_stacks_block_height = stacks_block_height; last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); @@ -8498,6 +8593,7 @@ fn check_block_info_rewards() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -8696,13 +8792,7 @@ fn check_block_info_rewards() { // (only 2 blocks maturation time in tests) info!("Mining 6 tenures to mature the block reward"); for i in 0..6 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 20, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 20, &naka_conf, &counters).unwrap(); info!("Mined a block ({i})"); } @@ -9268,10 +9358,10 @@ fn v3_signer_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -9332,13 +9422,7 @@ fn v3_signer_api_endpoint() { // Mine some nakamoto tenures for _i in 0..naka_tenures { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -9444,7 +9528,7 @@ fn v3_blockbyheight_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9466,13 +9550,7 @@ fn v3_blockbyheight_api_endpoint() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -9567,11 +9645,10 @@ fn nakamoto_lockup_events() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9602,13 +9679,7 @@ fn nakamoto_lockup_events() { info!("------------------------- Setup finished, run test -------------------------"); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -9752,6 +9823,7 @@ fn skip_mining_long_tx() { naka_mined_blocks: mined_naka_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -9790,13 +9862,7 @@ fn skip_mining_long_tx() { // Mine a few nakamoto tenures with some interim blocks in them for i in 0..5 { let mined_before = mined_naka_blocks.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); if i == 0 { // we trigger the nakamoto miner to evaluate the long running transaction, @@ -9904,17 +9970,10 @@ fn test_shadow_recovery() { let naka_conf = signer_test.running_nodes.conf.clone(); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; - let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let counters = signer_test.running_nodes.counters.clone(); // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -9984,18 +10043,11 @@ fn test_shadow_recovery() { // revive ATC-C by waiting for commits for _i in 0..4 { - btc_regtest_controller.bootstrap_chain(1); - sleep_ms(30_000); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // all shadow blocks are present and processed let mut shadow_ids = HashSet::new(); @@ -10492,7 +10544,6 @@ fn clarity_cost_spend_down() { .get_stacks_blocks_processed(); // Pause mining so we can add all our transactions to the mempool at once. TEST_MINE_STALL.set(true); - let mut submitted_txs = vec![]; for _nmb_tx in 0..nmb_txs_per_signer { for sender_sk in sender_sks.iter() { let sender_nonce = get_and_increment_nonce(sender_sk, &mut sender_nonces); @@ -10508,9 +10559,7 @@ fn clarity_cost_spend_down() { &[], ); match submit_tx_fallible(&http_origin, &contract_tx) { - Ok(txid) => { - submitted_txs.push(txid); - } + Ok(_txid) => {} Err(_e) => { // If we fail to submit a tx, we need to make sure we don't // increment the nonce for this sender, so we don't end up @@ -10721,8 +10770,7 @@ fn test_tenure_extend_from_flashblocks() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); + let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -10742,13 +10790,7 @@ fn test_tenure_extend_from_flashblocks() { .unwrap(); for _ in 0..3 { - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let burn_view_contract = r#" @@ -10780,69 +10822,33 @@ fn test_tenure_extend_from_flashblocks() { ); submit_tx(&http_origin, &contract_tx); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&naka_conf.node.data_url, &deployer_addr).nonce; + Ok(sender_nonce > 0) }) .expect("Timed out waiting for interim blocks to be mined"); - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // stall miner and relayer - // make tenure but don't wait for a stacks block - next_block_and_commits_only( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + // make tenure + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - // prevent the mienr from sending another block-commit + // prevent the miner from sending another block-commit nakamoto_test_skip_commit_op.set(true); - // make sure we get a block-found tenure change - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - // make sure the relayer processes both sortitions - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); + let info_before = get_chain_info(&naka_conf); // mine another Bitcoin block right away, since it will contain a block-commit btc_regtest_controller.bootstrap_chain(1); - wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) - }) - .unwrap(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let info = get_chain_info(&naka_conf); + Ok(info.burn_block_height > info_before.burn_block_height + && info.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for interim blocks to be mined"); + .unwrap(); let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); @@ -10869,11 +10875,9 @@ fn test_tenure_extend_from_flashblocks() { // Given the above, this will be an `Extend` tenure. TEST_MINER_THREAD_STALL.set(false); - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) + let cur_sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(cur_sort_tip.block_height > sort_tip.block_height) }) .unwrap(); @@ -10951,7 +10955,6 @@ fn test_tenure_extend_from_flashblocks() { // wait for the miner directive to be processed wait_for(60, || { - sleep_ms(30_000); let directives_cnt = nakamoto_miner_directives.load(Ordering::SeqCst); Ok(directives_cnt > miner_directives_before) }) @@ -10959,7 +10962,7 @@ fn test_tenure_extend_from_flashblocks() { // wait for all of the aforementioned transactions to get mined wait_for(120, || { - // fill mempool with transactions that depend on the burn view + // check account nonces from the sent transactions for (sender_sk, account_before) in account_keys.iter().zip(accounts_before.iter()) { let sender_addr = tests::to_addr(sender_sk); let account = loop { @@ -11016,28 +11019,7 @@ fn test_tenure_extend_from_flashblocks() { } // mine one additional tenure, to verify that we're on track - let commits_before = commits_submitted.load(Ordering::SeqCst); - let node_info_before = get_chain_info_opt(&naka_conf).unwrap(); - - btc_regtest_controller.bootstrap_chain(1); - - wait_for(20, || { - Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) - }) - .unwrap(); - - // there was a sortition winner - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(sort_tip.sortition); - - wait_for(20, || { - let node_info = get_chain_info_opt(&naka_conf).unwrap(); - Ok( - node_info.burn_block_height > node_info_before.burn_block_height - && node_info.stacks_tip_height > node_info_before.stacks_tip_height, - ) - }) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // boot a follower. it should reach the chain tip info!("----- BEGIN FOLLOWR BOOTUP ------"); @@ -11092,9 +11074,8 @@ fn test_tenure_extend_from_flashblocks() { debug!("Booted follower-thread"); - let miner_info = get_chain_info_result(&naka_conf).unwrap(); - wait_for(300, || { + let miner_info = get_chain_info_result(&naka_conf).unwrap(); let Ok(info) = get_chain_info_result(&follower_conf) else { sleep_ms(1000); return Ok(false); diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 2c224c8e34..6f348275e0 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -8464,8 +8464,6 @@ fn atlas_stress_integration_test() { panic!(); } - let mut all_zonefiles = vec![]; - // make a _ton_ of name-imports for i in 0..batches { let account_before = get_account(&http_origin, &to_addr(&user_1)); @@ -8477,8 +8475,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_3 = make_contract_call( &user_1, 2 + (batch_size * i + j) as u64, @@ -8666,8 +8662,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade01{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_6 = make_contract_call( &users[batches * batch_size + j], 1, @@ -8730,8 +8724,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade02{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_7 = make_contract_call( &users[batches * batch_size + j], 2, @@ -8793,8 +8785,6 @@ fn atlas_stress_integration_test() { let zonefile_hex = format!("facade03{j:04x}"); let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap()); - all_zonefiles.push(zonefile_hex.clone()); - let tx_8 = make_contract_call( &users[batches * batch_size + j], 3, diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 6ef2431a3a..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -1,4 +1,4 @@ -// Copyright (C) 2020-2024 Stacks Open Internet Foundation +// Copyright (C) 2020-2025 Stacks Open Internet Foundation // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by @@ -15,20 +15,6 @@ mod v0; use std::collections::HashSet; -// Copyright (C) 2020-2024 Stacks Open Internet Foundation -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU General Public License for more details. -// -// You should have received a copy of the GNU General Public License -// along with this program. If not, see . use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::{Arc, Mutex}; use std::thread; @@ -97,6 +83,7 @@ pub struct RunningNodes { pub nakamoto_blocks_signer_pushed: RunLoopCounter, pub nakamoto_miner_directives: Arc, pub nakamoto_test_skip_commit_op: TestFlag, + pub counters: Counters, pub coord_channel: Arc>, pub conf: NeonConfig, } @@ -349,15 +336,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], - commits_submitted: &[&Arc], + node_confs: &[&NeonConfig], + node_counters: &[&Counters], timeout: Duration, ) { let blocks_len = test_observer::get_blocks().len(); @@ -383,8 +369,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest BlockResponse { - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &self.running_nodes.conf.node.rpc_bind, StacksPrivateKey::random(), // We are just reading so don't care what the key is false, @@ -818,7 +804,7 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest( naka_signer_pushed_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -984,6 +971,7 @@ fn setup_stx_btc_node( nakamoto_test_skip_commit_op, nakamoto_miner_directives: naka_miner_directives.0, coord_channel, + counters, conf: naka_conf, } } diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index f9050644dc..dfe5c34443 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -85,8 +85,9 @@ use crate::tests::nakamoto_integrations::{ POX_4_DEFAULT_STACKER_BALANCE, POX_4_DEFAULT_STACKER_STX_AMT, }; use crate::tests::neon_integrations::{ - get_account, get_chain_info, get_chain_info_opt, get_sortition_info, get_sortition_info_ch, - next_block_and_wait, run_until_burnchain_height, submit_tx, submit_tx_fallible, test_observer, + get_account, get_chain_info, get_chain_info_opt, get_pox_info, get_sortition_info, + get_sortition_info_ch, next_block_and_wait, run_until_burnchain_height, submit_tx, + submit_tx_fallible, test_observer, }; use crate::tests::{ self, gen_random_port, make_contract_call, make_contract_publish, make_stacks_transfer, @@ -1607,10 +1608,7 @@ fn multiple_miners() { let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(conf_node_2.clone()).unwrap(); let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); - let Counters { - naka_submitted_commits: rl2_commits, - .. - } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -1637,8 +1635,7 @@ fn multiple_miners() { // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); @@ -1657,8 +1654,8 @@ fn multiple_miners() { info!("Issue next block-build request\ninfo 1: {info_1:?}\ninfo 2: {info_2:?}\n"); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], + &[&conf, &conf_node_2], + &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); @@ -2616,6 +2613,53 @@ fn tenure_extend_after_idle_signers() { return; } + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::from_default_env()) + .init(); + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(30); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, + None, + None, + ); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +} + +#[test] +#[ignore] +/// This test verifies that a miner will include other transactions with a TenureExtend transaction. +fn tenure_extend_with_other_transactions() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + tracing_subscriber::registry() .with(fmt::layer()) .with(EnvFilter::from_default_env()) @@ -2627,7 +2671,7 @@ fn tenure_extend_after_idle_signers() { let sender_addr = tests::to_addr(&sender_sk); let send_amt = 100; let send_fee = 180; - let _recipient = PrincipalData::from(StacksAddress::burn_address(false)); + let recipient = PrincipalData::from(StacksAddress::burn_address(false)); let idle_timeout = Duration::from_secs(30); let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( num_signers, @@ -2635,24 +2679,78 @@ fn tenure_extend_after_idle_signers() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); - let _http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); + let http_origin = format!("http://{}", &signer_test.running_nodes.conf.node.rpc_bind); signer_test.boot_to_epoch_3(); info!("---- Nakamoto booted, starting test ----"); signer_test.mine_nakamoto_block(Duration::from_secs(30), true); - info!("---- Waiting for a tenure extend ----"); + info!("Pause miner so it doesn't propose a block before the tenure extend"); + TEST_MINE_STALL.set(true); + + // Submit a transaction to be included with the tenure extend + let transfer_tx = make_stacks_transfer( + &sender_sk, + 0, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + let _tx = submit_tx(&http_origin, &transfer_tx); + + info!("---- Wait for tenure extend timeout ----"); + + sleep_ms(idle_timeout.as_millis() as u64 + 1000); + + info!("---- Resume miner to propose a block with the tenure extend ----"); + TEST_MINE_STALL.set(false); // Now, wait for a block with a tenure extend wait_for(idle_timeout.as_secs() + 10, || { - Ok(last_block_contains_tenure_change_tx( - TenureChangeCause::Extended, - )) + let blocks = test_observer::get_blocks(); + let last_block = &blocks.last().unwrap(); + let transactions = last_block["transactions"].as_array().unwrap(); + let (first_tx, other_txs) = transactions.split_first().unwrap(); + let raw_tx = first_tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + let found_tenure_extend = match &parsed.payload { + TransactionPayload::TenureChange(payload) + if payload.cause == TenureChangeCause::Extended => + { + info!("Found tenure extend transaction: {parsed:?}"); + true + } + _ => false, + }; + if found_tenure_extend { + let found_transfer = other_txs.iter().any(|tx| { + let raw_tx = tx["raw_tx"].as_str().unwrap(); + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + match &parsed.payload { + TransactionPayload::TokenTransfer(..) => true, + _ => false, + } + }); + if found_transfer { + info!("Found transfer transaction"); + Ok(true) + } else { + Err("No transfer transaction found together with the tenure extend".to_string()) + } + } else { + info!("No tenure change transaction found"); + Ok(false) + } }) .expect("Timed out waiting for a block with a tenure extend"); @@ -2690,6 +2788,7 @@ fn tenure_extend_after_idle_miner() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2766,6 +2865,7 @@ fn tenure_extend_succeeds_after_rejected_attempt() { }, |config| { config.miner.tenure_timeout = miner_idle_timeout; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -2854,7 +2954,9 @@ fn stx_transfers_dont_effect_idle_timeout() { |config| { config.tenure_idle_timeout = idle_timeout; }, - |_| {}, + |config| { + config.miner.tenure_extend_cost_threshold = 0; + }, None, None, ); @@ -2988,6 +3090,7 @@ fn idle_tenure_extend_active_mining() { |config| { // accept all proposals in the node config.connection_options.block_proposal_max_age_secs = u64::MAX; + config.miner.tenure_extend_cost_threshold = 0; }, None, None, @@ -3330,7 +3433,7 @@ fn empty_sortition() { info!("------------------------- Test Delayed Block is Rejected -------------------------"); let reward_cycle = signer_test.get_current_reward_cycle(); - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::random(), // We are just reading so don't care what the key is false, @@ -4660,10 +4763,10 @@ fn multiple_miners_with_nakamoto_blocks() { let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { - naka_submitted_commits: rl2_commits, naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -4690,8 +4793,7 @@ fn multiple_miners_with_nakamoto_blocks() { // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); @@ -4706,8 +4808,8 @@ fn multiple_miners_with_nakamoto_blocks() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], + &[&conf, &conf_node_2], + &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); btc_blocks_mined += 1; @@ -4934,6 +5036,8 @@ fn partial_tenure_fork() { naka_skip_commit_op: rl2_skip_commit_op, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); + let rl1_counters = signer_test.running_nodes.counters.clone(); signer_test.boot_to_epoch_3(); let run_loop_2_thread = thread::Builder::new() @@ -5004,35 +5108,37 @@ fn partial_tenure_fork() { rl1_skip_commit_op.set(true); rl2_skip_commit_op.set(true); - let mined_before_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_before_2 = blocks_mined2.load(Ordering::SeqCst); - let commits_before_1 = commits_1.load(Ordering::SeqCst); - let commits_before_2 = commits_2.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); // Mine the first block next_block_and( &mut signer_test.running_nodes.btc_regtest_controller, 180, || { - let mined_1 = blocks_mined1.load(Ordering::SeqCst); - let mined_2 = blocks_mined2.load(Ordering::SeqCst); - - Ok(mined_1 > mined_before_1 || mined_2 > mined_before_2) + let info_1 = get_chain_info(&conf); + Ok(info_1.stacks_tip_height > info_before.stacks_tip_height) }, ) .expect("Timed out waiting for new Stacks block to be mined"); info!("-------- Mined first block, wait for block commits --------"); + let info_before = get_chain_info(&conf); + // Unpause block commits and wait for both miners' commits rl1_skip_commit_op.set(false); rl2_skip_commit_op.set(false); - // Ensure that both block commits have been sent before continuing + // Ensure that both miners' commits point at the stacks tip wait_for(60, || { - let commits_after_1 = commits_1.load(Ordering::SeqCst); - let commits_after_2 = commits_2.load(Ordering::SeqCst); - Ok(commits_after_1 > commits_before_1 && commits_after_2 > commits_before_2) + let last_committed_1 = rl1_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + let last_committed_2 = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(last_committed_1 >= info_before.stacks_tip_height + && last_committed_2 >= info_before.stacks_tip_height) }) .expect("Timed out waiting for block commits"); @@ -6438,19 +6544,22 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { /// Mine 2 empty burn blocks (simulate fast blocks scenario) /// Miner 2 proposes block N+1 with a TenureChangePayload /// Signers accept and the stacks tip advances to N+1 -/// Miner 2 proposes block N+2 with a TokenTransfer +/// Miner 2 proposes block N+2 with a TenureExtend /// Signers accept and the stacks tip advances to N+2 +/// Miner 2 proposes block N+3 with a TokenTransfer +/// Signers accept and the stacks tip advances to N+3 /// Mine an empty burn block -/// Miner 2 proposes block N+3 with a TenureExtend -/// Signers accept and the chain advances to N+3 -/// Miner 1 wins the next tenure and proposes a block N+4 with a TenureChangePayload +/// Miner 2 proposes block N+4 with a TenureExtend /// Signers accept and the chain advances to N+4 +/// Miner 1 wins the next tenure and proposes a block N+5 with a TenureChangePayload +/// Signers accept and the chain advances to N+5 /// Asserts: /// - Block N+1 contains the TenureChangePayload -/// - Block N+2 contains the TokenTransfer -/// - Block N+3 contains the TenureExtend -/// - Block N+4 contains the TenureChangePayload -/// - The stacks tip advances to N+4 +/// - Block N+2 contains the TenureExtend +/// - Block N+3 contains the TokenTransfer +/// - Block N+4 contains the TenureExtend +/// - Block N+5 contains the TenureChangePayload +/// - The stacks tip advances to N+5 #[test] #[ignore] fn continue_after_fast_block_no_sortition() { @@ -6811,7 +6920,7 @@ fn continue_after_fast_block_no_sortition() { // Allow signers to respond to proposals again TEST_REJECT_ALL_BLOCK_PROPOSAL.set(Vec::new()); - info!("------------------------- Wait for Miner B's Block N -------------------------"; + info!("------------------------- Wait for Miner B's Block N+1 -------------------------"; "blocks_processed_before_2" => %blocks_processed_before_2, "stacks_height_before" => %stacks_height_before, "nmb_old_blocks" => %nmb_old_blocks); @@ -6826,7 +6935,7 @@ fn continue_after_fast_block_no_sortition() { let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; + info!("Waiting for Miner B's Block N+1"; "blocks_mined1_val" => %blocks_mined1_val, "blocks_mined2_val" => %blocks_mined2_val, "stacks_height" => %stacks_height, @@ -6841,11 +6950,11 @@ fn continue_after_fast_block_no_sortition() { .expect("Timed out waiting for block to be mined and processed"); info!( - "------------------------- Verify Tenure Change Tx in Miner B's Block N -------------------------" + "------------------------- Verify Tenure Change Tx in Miner B's Block N+1 -------------------------" ); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); - info!("------------------------- Wait for Miner B's Block N+1 -------------------------"); + info!("------------------------- Wait for Miner B's Block N+2 -------------------------"); let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); @@ -6855,18 +6964,7 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // submit a tx so that the miner will mine an extra block - let transfer_tx = make_stacks_transfer( - &sender_sk, - sender_nonce, - send_fee, - signer_test.running_nodes.conf.burnchain.chain_id, - &recipient, - send_amt, - ); - submit_tx(&http_origin, &transfer_tx); - - // wait for the tenure-extend block to be processed + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client @@ -6881,8 +6979,12 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); + info!("------------------------- Verify Miner B's Block N+2 -------------------------"); + verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); + info!("------------------------- Wait for Miner B's Block N+3 -------------------------"); + let nmb_old_blocks = test_observer::get_blocks().len(); let blocks_processed_before_2 = blocks_mined2.load(Ordering::SeqCst); let stacks_height_before = signer_test @@ -6891,22 +6993,24 @@ fn continue_after_fast_block_no_sortition() { .expect("Failed to get peer info") .stacks_tip_height; - // wait for the new block with the STX transfer to be processed + // submit a tx so that the miner will mine an extra block + let transfer_tx = make_stacks_transfer( + &sender_sk, + sender_nonce, + send_fee, + signer_test.running_nodes.conf.burnchain.chain_id, + &recipient, + send_amt, + ); + submit_tx(&http_origin, &transfer_tx); + + // wait for the transfer block to be processed wait_for(30, || { let stacks_height = signer_test .stacks_client .get_peer_info() .expect("Failed to get peer info") .stacks_tip_height; - - let blocks_mined1_val = blocks_mined1.load(Ordering::SeqCst); - let blocks_mined2_val = blocks_mined2.load(Ordering::SeqCst); - info!("Waiting for Miner B's Block N"; - "blocks_mined1_val" => %blocks_mined1_val, - "blocks_mined2_val" => %blocks_mined2_val, - "stacks_height" => %stacks_height, - "observed_blocks" => %test_observer::get_blocks().len()); - Ok( blocks_mined2.load(Ordering::SeqCst) > blocks_processed_before_2 && stacks_height > stacks_height_before @@ -6915,7 +7019,7 @@ fn continue_after_fast_block_no_sortition() { }) .expect("Timed out waiting for block to be mined and processed"); - info!("------------------------- Verify Miner B's Block N+1 -------------------------"); + info!("------------------------- Verify Miner B's Block N+3 -------------------------"); verify_last_block_contains_transfer_tx(); @@ -6932,7 +7036,7 @@ fn continue_after_fast_block_no_sortition() { .unwrap(); btc_blocks_mined += 1; - info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+2 -------------------------"); + info!("------------------------- Verify Miner B's Issues a Tenure Change Extend in Block N+4 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::Extended); info!("------------------------- Unpause Miner A's Block Commits -------------------------"); @@ -6967,7 +7071,7 @@ fn continue_after_fast_block_no_sortition() { assert!(tip.sortition); assert_eq!(tip.miner_pk_hash.unwrap(), mining_pkh_1); - info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+4 -------------------------"); + info!("------------------------- Verify Miner A's Issued a Tenure Change in Block N+5 -------------------------"); verify_last_block_contains_tenure_change_tx(TenureChangeCause::BlockFound); info!( @@ -7343,10 +7447,10 @@ fn multiple_miners_with_custom_chain_id() { let run_loop_stopper_2 = run_loop_2.get_termination_switch(); let rl2_coord_channels = run_loop_2.coordinator_channels(); let Counters { - naka_submitted_commits: rl2_commits, naka_mined_blocks: blocks_mined2, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -7373,8 +7477,7 @@ fn multiple_miners_with_custom_chain_id() { // is that we keep track of how many tenures each miner produced, and once enough sortitions // have been produced such that each miner has produced 3 tenures, we stop and check the // results at the end - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); - let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); let miner_1_pk = StacksPublicKey::from_private(conf.miner.mining_key.as_ref().unwrap()); let miner_2_pk = StacksPublicKey::from_private(conf_node_2.miner.mining_key.as_ref().unwrap()); @@ -7389,8 +7492,8 @@ fn multiple_miners_with_custom_chain_id() { let blocks_processed_before = blocks_mined1.load(Ordering::SeqCst) + blocks_mined2.load(Ordering::SeqCst); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], + &[&conf, &conf_node_2], + &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); btc_blocks_mined += 1; @@ -7792,6 +7895,114 @@ fn block_validation_response_timeout() { ); } +// Verify that the miner timeout while waiting for signers will change accordingly +// to rejections. +#[test] +#[ignore] +fn block_validation_check_rejection_timeout_heuristic() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 20; + let timeout = Duration::from_secs(30); + let sender_sk = Secp256k1PrivateKey::random(); + let sender_addr = tests::to_addr(&sender_sk); + let send_amt = 100; + let send_fee = 180; + + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(99)); + }, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + signer_test.boot_to_epoch_3(); + + // note we just use mined nakamoto_blocks as the second block is not going to be confirmed + + let mut test_rejections = |signer_split_index: usize, expected_timeout: u64| { + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let (ignore_signers, reject_signers) = all_signers.split_at(signer_split_index); + + info!("------------------------- Check Rejections-based timeout with {} rejections -------------------------", reject_signers.len()); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(reject_signers.to_vec()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignore_signers.to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections(timeout.as_secs(), &reject_signers) + .unwrap(); + + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= reject_signers.len() as u64) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + expected_timeout + ); + }; + + test_rejections(19, 123); + test_rejections(18, 20); + test_rejections(17, 10); + test_rejections(16, 99); + + // reset reject/ignore + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); +} + /// Test scenario: /// /// - when a signer submits a block validation request and @@ -10524,7 +10735,7 @@ fn incoming_signers_ignore_block_proposals() { .expect("Timed out waiting for a block to be mined"); let blocks_before = mined_blocks.load(Ordering::SeqCst); - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::random(), // We are just reading so don't care what the key is false, @@ -10699,7 +10910,7 @@ fn outgoing_signers_ignore_block_proposals() { .unwrap() .signer_signature_hash; let blocks_before = mined_blocks.load(Ordering::SeqCst); - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &signer_test.running_nodes.conf.node.rpc_bind, StacksPrivateKey::random(), // We are just reading so don't care what the key is false, @@ -11096,7 +11307,7 @@ fn injected_signatures_are_ignored_across_boundaries() { // The first 50% of the signers are the ones that are ignoring block proposals and thus haven't sent a signature yet let forced_signer = &signer_test.signer_stacks_private_keys[ignoring_signers.len()]; - let mut stackerdb = StackerDB::new( + let mut stackerdb = StackerDB::new_normal( &signer_test.running_nodes.conf.node.rpc_bind, forced_signer.clone(), false, @@ -11556,6 +11767,7 @@ fn multiple_miners_empty_sortition() { naka_submitted_commits: rl2_commits, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -11592,16 +11804,16 @@ fn multiple_miners_empty_sortition() { ); submit_tx(&conf.node.data_url, &contract_tx); - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); let last_sender_nonce = loop { // Mine 1 nakamoto tenures info!("Mining tenure..."); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], + &[&conf, &conf_node_2], + &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); @@ -11635,16 +11847,29 @@ fn multiple_miners_empty_sortition() { let last_active_sortition = get_sortition_info(&conf); assert!(last_active_sortition.was_sortition); + // check if we're about to cross a reward cycle boundary -- if so, we can't + // perform this test, because we can't tenure extend across the boundary + let pox_info = get_pox_info(&conf.node.data_url).unwrap(); + let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; + if blocks_until_next_cycle == 1 { + info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); + continue; + } + // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); + signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); @@ -11762,16 +11987,17 @@ fn single_miner_empty_sortition() { ); submit_tx(&conf.node.data_url, &contract_tx); - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); + let rl1_conf = signer_test.running_nodes.conf.clone(); for _i in 0..3 { // Mine 1 nakamoto tenures info!("Mining tenure..."); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels], - &[&rl1_commits], + &[&rl1_conf], + &[&rl1_counters], Duration::from_secs(30), ); @@ -11807,13 +12033,16 @@ fn single_miner_empty_sortition() { // lets mine a btc flash block let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); @@ -12497,3 +12726,112 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction +/// only after it has reached the cost threshold. +fn tenure_extend_cost_threshold() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let deployer_sk = Secp256k1PrivateKey::random(); + let deployer_addr = tests::to_addr(&deployer_sk); + let num_txs = 10; + let tx_fee = 10000; + let deploy_fee = 190200; + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(deployer_addr, deploy_fee + tx_fee * num_txs)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_extend_cost_threshold = 5; + }, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect_err("Received a tenure extend before cost threshold was reached"); + + // Now deploy a contract and call it in order to cross the threshold. + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + ["(var-get my-var)"; 250].join(" ") + ); + + // First, lets deploy the contract + let mut nonce = 0; + let contract_tx = make_contract_publish( + &deployer_sk, + nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + nonce += 1; + + // Wait for the contract to be included in a block + wait_for(60, || { + let account = get_account(&http_origin, &deployer_addr); + Ok(account.nonce == nonce) + }) + .expect("Contract not included in block"); + + // Ensure the tenure was not extended in that block + assert!(!last_block_contains_tenure_change_tx( + TenureChangeCause::Extended + )); + + // Now, lets call the contract a bunch of times to increase the tenure cost + for _ in 0..num_txs { + let call_tx = make_contract_call( + &deployer_sk, + nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + submit_tx(&http_origin, &call_tx); + nonce += 1; + } + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +}