diff --git a/.github/workflows/bitcoin-tests.yml b/.github/workflows/bitcoin-tests.yml index 13d94438f9..b07c0dc2e4 100644 --- a/.github/workflows/bitcoin-tests.yml +++ b/.github/workflows/bitcoin-tests.yml @@ -83,7 +83,8 @@ jobs: - tests::neon_integrations::start_stop_bitcoind - tests::should_succeed_handling_malformed_and_valid_txs - tests::nakamoto_integrations::simple_neon_integration - - tests::nakamoto_integrations::flash_blocks_on_epoch_3 + # Disable this flaky test. We don't need continue testing Epoch 2 -> 3 transition + # - tests::nakamoto_integrations::flash_blocks_on_epoch_3_FLAKY - tests::nakamoto_integrations::mine_multiple_per_tenure_integration - tests::nakamoto_integrations::block_proposal_api_endpoint - tests::nakamoto_integrations::miner_writes_proposed_block_to_stackerdb @@ -134,6 +135,7 @@ jobs: - tests::signer::v0::block_commit_delay - tests::signer::v0::continue_after_fast_block_no_sortition - tests::signer::v0::block_validation_response_timeout + - tests::signer::v0::block_validation_check_rejection_timeout_heuristic - tests::signer::v0::block_validation_pending_table - tests::signer::v0::new_tenure_while_validating_previous_scenario - tests::signer::v0::tenure_extend_after_bad_commit diff --git a/CHANGELOG.md b/CHANGELOG.md index a4cc59812f..1f7fce479b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,15 +8,22 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE ## [Unreleased] ### Added -- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will - run the signer binary as if it were a registered signer. Instead of broadcasting - `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` - behave normally (e.g., submitting validation requests, submitting finished blocks). A - dry run signer will error out if the supplied key is actually a registered signer. + +### Changed + +### Fixed + +## [3.1.0.0.5] + +### Added + +- Add miner configuration option `tenure_extend_cost_threshold` to specify the percentage of the tenure budget that must be spent before a time-based tenure extend is attempted ### Changed - Miner will include other transactions in blocks with tenure extend transactions (#5760) +- Add `block_rejection_timeout_steps` to miner configuration for defining rejections-based timeouts while waiting for signers response (#5705) +- Miner will not issue a tenure extend until at least half of the block budget has been spent (#5757) ### Fixed diff --git a/stacks-signer/CHANGELOG.md b/stacks-signer/CHANGELOG.md index 398d4125b3..4eaa122f8d 100644 --- a/stacks-signer/CHANGELOG.md +++ b/stacks-signer/CHANGELOG.md @@ -11,46 +11,56 @@ and this project adheres to the versioning scheme outlined in the [README.md](RE - Increase default `block_proposal_timeout_ms` from 10 minutes to 4 hours. Until #5729 is implemented, there is no value in rejecting a late block from a miner, since a late block is better than no block at all. +## [3.1.0.0.5.0] + +### Added + +- Add `dry_run` configuration option to `stacks-signer` config toml. Dry run mode will + run the signer binary as if it were a registered signer. Instead of broadcasting + `StackerDB` messages, it logs `INFO` messages. Other interactions with the `stacks-node` + behave normally (e.g., submitting validation requests, submitting finished blocks). A + dry run signer will error out if the supplied key is actually a registered signer. + ## [3.1.0.0.4.0] -## Added +### Added - When a new block proposal is received while the signer is waiting for an existing proposal to be validated, the signer will wait until the existing block is done validating before submitting the new one for validating. ([#5453](https://github.com/stacks-network/stacks-core/pull/5453)) - Introduced two new prometheus metrics: - `stacks_signer_block_validation_latencies_histogram`: the validation_time_ms reported by the node when validating a block proposal - `stacks_signer_block_response_latencies_histogram`: the "end-to-end" time it takes for the signer to issue a block response -## Changed +### Changed ## [3.1.0.0.3.0] -## Added +### Added - Introduced the `block_proposal_max_age_secs` configuration option for signers, enabling them to automatically ignore block proposals that exceed the specified age in seconds. -## Changed +### Changed - Improvements to the stale signer cleanup logic: deletes the prior signer if it has no remaining unprocessed blocks in its database - Signers now listen to new block events from the stacks node to determine whether a block has been successfully appended to the chain tip -# [3.1.0.0.2.1] +## [3.1.0.0.2.1] -## Added +### Added -## Changed +### Changed - Prevent old reward cycle signers from processing block validation response messages that do not apply to blocks from their cycle. -# [3.1.0.0.2.1] +## [3.1.0.0.2.1] -## Added +### Added -## Changed +### Changed - Prevent old reward cycle signers from processing block validation response messages that do not apply to blocks from their cycle. ## [3.1.0.0.2.0] -## Added +### Added - **SIP-029 consensus rules, activating in epoch 3.1 at block 875,000** (see [SIP-029](https://github.com/will-corcoran/sips/blob/feat/sip-029-halving-alignment/sips/sip-029/sip-029-halving-alignment.md) for details) diff --git a/stackslib/src/burnchains/bitcoin/indexer.rs b/stackslib/src/burnchains/bitcoin/indexer.rs index bddd1eedac..8f04d08a66 100644 --- a/stackslib/src/burnchains/bitcoin/indexer.rs +++ b/stackslib/src/burnchains/bitcoin/indexer.rs @@ -265,40 +265,31 @@ impl BitcoinIndexer { Ok(s) => { // Disable Nagle algorithm s.set_nodelay(true).map_err(|_e| { - test_debug!("Failed to set TCP_NODELAY: {:?}", &_e); + test_debug!("Failed to set TCP_NODELAY: {_e:?}"); btc_error::ConnectionError })?; // set timeout s.set_read_timeout(Some(Duration::from_secs(self.runtime.timeout))) .map_err(|_e| { - test_debug!("Failed to set TCP read timeout: {:?}", &_e); + test_debug!("Failed to set TCP read timeout: {_e:?}"); btc_error::ConnectionError })?; s.set_write_timeout(Some(Duration::from_secs(self.runtime.timeout))) .map_err(|_e| { - test_debug!("Failed to set TCP write timeout: {:?}", &_e); + test_debug!("Failed to set TCP write timeout: {_e:?}"); btc_error::ConnectionError })?; - match self.runtime.sock.take() { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s_old) = self.runtime.sock.replace(s) { + let _ = s_old.shutdown(Shutdown::Both); } - - self.runtime.sock = Some(s); Ok(()) } Err(_e) => { - let s = self.runtime.sock.take(); - match s { - Some(s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(s) = self.runtime.sock.take() { + let _ = s.shutdown(Shutdown::Both); } Err(btc_error::ConnectionError) } @@ -926,11 +917,8 @@ impl BitcoinIndexer { impl Drop for BitcoinIndexer { fn drop(&mut self) { - match self.runtime.sock { - Some(ref mut s) => { - let _ = s.shutdown(Shutdown::Both); - } - None => {} + if let Some(ref mut s) = self.runtime.sock { + let _ = s.shutdown(Shutdown::Both); } } } diff --git a/stackslib/src/burnchains/db.rs b/stackslib/src/burnchains/db.rs index cccb6874e9..f8343ae898 100644 --- a/stackslib/src/burnchains/db.rs +++ b/stackslib/src/burnchains/db.rs @@ -1193,9 +1193,10 @@ impl BurnchainDB { let ops: Vec = query_rows(&self.conn, qry, args).expect("FATAL: burnchain DB query error"); for op in ops { - if let Some(_) = indexer + if indexer .find_burnchain_header_height(&op.burn_header_hash()) .expect("FATAL: burnchain DB query error") + .is_some() { // this is the op on the canonical fork return Some(op); diff --git a/stackslib/src/burnchains/tests/mod.rs b/stackslib/src/burnchains/tests/mod.rs index 5af5848ed2..2b50656df6 100644 --- a/stackslib/src/burnchains/tests/mod.rs +++ b/stackslib/src/burnchains/tests/mod.rs @@ -577,13 +577,10 @@ impl TestBurnchainBlock { pub fn patch_from_chain_tip(&mut self, parent_snapshot: &BlockSnapshot) { assert_eq!(parent_snapshot.block_height + 1, self.block_height); - for i in 0..self.txs.len() { - match self.txs[i] { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - assert_eq!(data.block_height, self.block_height); - data.consensus_hash = parent_snapshot.consensus_hash.clone(); - } - _ => {} + for tx in self.txs.iter_mut() { + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = tx { + assert_eq!(data.block_height, self.block_height); + data.consensus_hash = parent_snapshot.consensus_hash.clone(); } } } diff --git a/stackslib/src/chainstate/nakamoto/miner.rs b/stackslib/src/chainstate/nakamoto/miner.rs index a36e52512d..d9ad1319f7 100644 --- a/stackslib/src/chainstate/nakamoto/miner.rs +++ b/stackslib/src/chainstate/nakamoto/miner.rs @@ -150,6 +150,21 @@ pub struct MinerTenureInfo<'a> { pub tenure_block_commit_opt: Option, } +/// Structure returned from `NakamotoBlockBuilder::build_nakamoto_block` with +/// information about the block that was built. +pub struct BlockMetadata { + /// The block that was built + pub block: NakamotoBlock, + /// The execution cost consumed so far by the current tenure + pub tenure_consumed: ExecutionCost, + /// The cost budget for the current tenure + pub tenure_budget: ExecutionCost, + /// The size of the blocks in the current tenure in bytes + pub tenure_size: u64, + /// The events emitted by the transactions included in this block + pub tx_events: Vec, +} + impl NakamotoBlockBuilder { /// Make a block builder from genesis (testing only) pub fn new_first_block( @@ -526,7 +541,7 @@ impl NakamotoBlockBuilder { settings: BlockBuilderSettings, event_observer: Option<&dyn MemPoolEventDispatcher>, signer_bitvec_len: u16, - ) -> Result<(NakamotoBlock, ExecutionCost, u64, Vec), Error> { + ) -> Result { let (tip_consensus_hash, tip_block_hash, tip_height) = ( parent_stacks_header.consensus_hash.clone(), parent_stacks_header.anchored_header.block_hash(), @@ -556,7 +571,7 @@ impl NakamotoBlockBuilder { builder.load_tenure_info(&mut chainstate, burn_dbconn, tenure_info.cause())?; let mut tenure_tx = builder.tenure_begin(burn_dbconn, &mut miner_tenure_info)?; - let block_limit = tenure_tx + let tenure_budget = tenure_tx .block_limit() .expect("Failed to obtain block limit from miner's block connection"); @@ -570,7 +585,7 @@ impl NakamotoBlockBuilder { (1..=100).contains(&percentage), "BUG: tenure_cost_limit_per_block_percentage: {percentage}%. Must be between between 1 and 100" ); - let mut remaining_limit = block_limit.clone(); + let mut remaining_limit = tenure_budget.clone(); let cost_so_far = tenure_tx.cost_so_far(); if remaining_limit.sub(&cost_so_far).is_ok() && remaining_limit.divide(100).is_ok() { remaining_limit.multiply(percentage.into()).expect( @@ -581,7 +596,7 @@ impl NakamotoBlockBuilder { "Setting soft limit for clarity cost to {percentage}% of remaining block limit"; "remaining_limit" => %remaining_limit, "cost_so_far" => %cost_so_far, - "block_limit" => %block_limit, + "block_limit" => %tenure_budget, ); soft_limit = Some(remaining_limit); }; @@ -630,13 +645,13 @@ impl NakamotoBlockBuilder { // save the block so we can build microblocks off of it let block = builder.mine_nakamoto_block(&mut tenure_tx); - let size = builder.bytes_so_far; - let consumed = builder.tenure_finish(tenure_tx)?; + let tenure_size = builder.bytes_so_far; + let tenure_consumed = builder.tenure_finish(tenure_tx)?; let ts_end = get_epoch_time_ms(); set_last_mined_block_transaction_count(block.txs.len() as u64); - set_last_mined_execution_cost_observed(&consumed, &block_limit); + set_last_mined_execution_cost_observed(&tenure_consumed, &tenure_budget); info!( "Miner: mined Nakamoto block"; @@ -645,14 +660,20 @@ impl NakamotoBlockBuilder { "height" => block.header.chain_length, "tx_count" => block.txs.len(), "parent_block_id" => %block.header.parent_block_id, - "block_size" => size, - "execution_consumed" => %consumed, - "percent_full" => block_limit.proportion_largest_dimension(&consumed), + "block_size" => tenure_size, + "execution_consumed" => %tenure_consumed, + "percent_full" => tenure_budget.proportion_largest_dimension(&tenure_consumed), "assembly_time_ms" => ts_end.saturating_sub(ts_start), "consensus_hash" => %block.header.consensus_hash ); - Ok((block, consumed, size, tx_events)) + Ok(BlockMetadata { + block, + tenure_consumed, + tenure_budget, + tenure_size, + tx_events, + }) } pub fn get_bytes_so_far(&self) -> u64 { diff --git a/stackslib/src/chainstate/nakamoto/mod.rs b/stackslib/src/chainstate/nakamoto/mod.rs index 85bb5924aa..056bd53fe4 100644 --- a/stackslib/src/chainstate/nakamoto/mod.rs +++ b/stackslib/src/chainstate/nakamoto/mod.rs @@ -2469,7 +2469,7 @@ impl NakamotoChainState { ) -> Result { test_debug!("Consider Nakamoto block {}", &block.block_id()); // do nothing if we already have this block - if let Some(_) = Self::get_block_header(headers_conn, &block.header.block_id())? { + if Self::get_block_header(headers_conn, &block.header.block_id())?.is_some() { debug!("Already have block {}", &block.header.block_id()); return Ok(false); } diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs index 725474afdd..f9741d060e 100644 --- a/stackslib/src/chainstate/stacks/block.rs +++ b/stackslib/src/chainstate/stacks/block.rs @@ -353,16 +353,13 @@ impl StacksMessageCodec for StacksBlock { // must be only one coinbase let mut coinbase_count = 0; for tx in txs.iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - coinbase_count += 1; - if coinbase_count > 1 { - return Err(codec_error::DeserializeError( - "Invalid block: multiple coinbases found".to_string(), - )); - } + if let TransactionPayload::Coinbase(..) = tx.payload { + coinbase_count += 1; + if coinbase_count > 1 { + return Err(codec_error::DeserializeError( + "Invalid block: multiple coinbases found".to_string(), + )); } - _ => {} } } @@ -515,26 +512,23 @@ impl StacksBlock { let mut found_coinbase = false; let mut coinbase_index = 0; for (i, tx) in txs.iter().enumerate() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - if !check_present { - warn!("Found unexpected coinbase tx {}", tx.txid()); - return false; - } - - if found_coinbase { - warn!("Found duplicate coinbase tx {}", tx.txid()); - return false; - } - - if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { - warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); - return false; - } - found_coinbase = true; - coinbase_index = i; + if let TransactionPayload::Coinbase(..) = tx.payload { + if !check_present { + warn!("Found unexpected coinbase tx {}", tx.txid()); + return false; + } + + if found_coinbase { + warn!("Found duplicate coinbase tx {}", tx.txid()); + return false; + } + + if tx.anchor_mode != TransactionAnchorMode::OnChainOnly { + warn!("Invalid coinbase tx {}: not on-chain only", tx.txid()); + return false; } - _ => {} + found_coinbase = true; + coinbase_index = i; } } diff --git a/stackslib/src/chainstate/stacks/boot/contract_tests.rs b/stackslib/src/chainstate/stacks/boot/contract_tests.rs index 9387c02bff..2d88cfe234 100644 --- a/stackslib/src/chainstate/stacks/boot/contract_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/contract_tests.rs @@ -486,7 +486,7 @@ impl BurnStateDB for TestSimBurnStateDB { height: u32, sortition_id: &SortitionId, ) -> Option<(Vec, u128)> { - if let Some(_) = self.get_burn_header_hash(height, sortition_id) { + if self.get_burn_header_hash(height, sortition_id).is_some() { let first_block = self.get_burn_start_height(); let prepare_len = self.get_pox_prepare_length(); let rc_len = self.get_pox_reward_cycle_length(); diff --git a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs index 67485301ad..ff5be1d0e5 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_2_tests.rs @@ -1371,23 +1371,20 @@ fn test_simple_pox_2_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - eprintln!("TX addr: {}", addr); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == charlie_address { - assert!( - r.execution_cost != ExecutionCost::ZERO, - "Execution cost is not zero!" - ); - charlie_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + eprintln!("TX addr: {}", addr); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == charlie_address { + assert!( + r.execution_cost != ExecutionCost::ZERO, + "Execution cost is not zero!" + ); + charlie_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs index 8e81c2965b..136559a195 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_3_tests.rs @@ -930,16 +930,13 @@ fn pox_auto_unlock(alice_first: bool) { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs index 41b7c108d8..392c6b2cd1 100644 --- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs +++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs @@ -9570,16 +9570,13 @@ fn missed_slots_no_unlock() { coinbase_txs.push(r); continue; } - match r.transaction { - TransactionOrigin::Stacks(ref t) => { - let addr = t.auth.origin().address_testnet(); - if addr == alice_address { - alice_txs.insert(t.auth.get_origin_nonce(), r); - } else if addr == bob_address { - bob_txs.insert(t.auth.get_origin_nonce(), r); - } + if let TransactionOrigin::Stacks(ref t) = r.transaction { + let addr = t.auth.origin().address_testnet(); + if addr == alice_address { + alice_txs.insert(t.auth.get_origin_nonce(), r); + } else if addr == bob_address { + bob_txs.insert(t.auth.get_origin_nonce(), r); } - _ => {} } } } diff --git a/stackslib/src/chainstate/stacks/db/blocks.rs b/stackslib/src/chainstate/stacks/db/blocks.rs index 0282e01427..8e6c0da9de 100644 --- a/stackslib/src/chainstate/stacks/db/blocks.rs +++ b/stackslib/src/chainstate/stacks/db/blocks.rs @@ -5137,7 +5137,7 @@ impl StacksChainState { ) { Ok(miner_rewards_opt) => miner_rewards_opt, Err(e) => { - if let Some(_) = miner_id_opt { + if miner_id_opt.is_some() { return Err(e); } else { let msg = format!("Failed to load miner rewards: {:?}", &e); @@ -11196,15 +11196,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = @@ -11879,15 +11876,12 @@ pub mod test { let (_, burn_header_hash, consensus_hash) = peer.next_burnchain_block(burn_ops.clone()); - match (stacks_block_opt, microblocks_opt) { - (Some(stacks_block), Some(microblocks)) => { - peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - last_block_id = StacksBlockHeader::make_index_block_hash( - &consensus_hash, - &stacks_block.block_hash(), - ); - } - _ => {} + if let (Some(stacks_block), Some(microblocks)) = (stacks_block_opt, microblocks_opt) { + peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); + last_block_id = StacksBlockHeader::make_index_block_hash( + &consensus_hash, + &stacks_block.block_hash(), + ); } let tip = diff --git a/stackslib/src/chainstate/stacks/db/mod.rs b/stackslib/src/chainstate/stacks/db/mod.rs index 6e543be3f1..14fece138e 100644 --- a/stackslib/src/chainstate/stacks/db/mod.rs +++ b/stackslib/src/chainstate/stacks/db/mod.rs @@ -1844,10 +1844,7 @@ impl StacksChainState { let nakamoto_staging_blocks_conn = StacksChainState::open_nakamoto_staging_blocks(&nakamoto_staging_blocks_path, true)?; - let init_required = match fs::metadata(&clarity_state_index_marf) { - Ok(_) => false, - Err(_) => true, - }; + let init_required = fs::metadata(&clarity_state_index_marf).is_err(); let state_index = StacksChainState::open_db(mainnet, chain_id, &header_index_root)?; @@ -2754,11 +2751,8 @@ pub mod test { balances: Vec<(StacksAddress, u64)>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances @@ -2874,11 +2868,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = @@ -2964,11 +2955,8 @@ pub mod test { }; let path = chainstate_path(function_name!()); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let mut chainstate = diff --git a/stackslib/src/chainstate/stacks/index/cache.rs b/stackslib/src/chainstate/stacks/index/cache.rs index c5b5323f39..1a1e9673ae 100644 --- a/stackslib/src/chainstate/stacks/index/cache.rs +++ b/stackslib/src/chainstate/stacks/index/cache.rs @@ -258,12 +258,11 @@ impl TrieCache { TrieCache::Everything(ref mut state) => { state.store_node_and_hash(block_id, trieptr, node, hash); } - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node_and_hash(block_id, trieptr, TrieNodeType::Node256(data), hash); } - _ => {} - }, + } } } @@ -273,12 +272,11 @@ impl TrieCache { match self { TrieCache::Noop(_) => {} TrieCache::Everything(ref mut state) => state.store_node(block_id, trieptr, node), - TrieCache::Node256(ref mut state) => match node { - TrieNodeType::Node256(data) => { + TrieCache::Node256(ref mut state) => { + if let TrieNodeType::Node256(data) = node { state.store_node(block_id, trieptr, TrieNodeType::Node256(data)) } - _ => {} - }, + } } } diff --git a/stackslib/src/chainstate/stacks/index/file.rs b/stackslib/src/chainstate/stacks/index/file.rs index 5a7da69e52..52f571aa1f 100644 --- a/stackslib/src/chainstate/stacks/index/file.rs +++ b/stackslib/src/chainstate/stacks/index/file.rs @@ -194,11 +194,8 @@ impl TrieFile { .map(|stat| Some(stat.len())) .unwrap_or(None); - match (size_before_opt, size_after_opt) { - (Some(sz_before), Some(sz_after)) => { - debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); - } - _ => {} + if let (Some(sz_before), Some(sz_after)) = (size_before_opt, size_after_opt) { + debug!("Shrank DB from {} to {} bytes", sz_before, sz_after); } Ok(()) @@ -213,7 +210,7 @@ impl TrieFile { let mut set_sqlite_tmpdir = false; let mut old_tmpdir_opt = None; if let Some(parent_path) = Path::new(db_path).parent() { - if let Err(_) = env::var("SQLITE_TMPDIR") { + if env::var("SQLITE_TMPDIR").is_err() { debug!( "Sqlite will store temporary migration state in '{}'", parent_path.display() @@ -461,11 +458,8 @@ impl TrieFile { self.write_all(buf)?; self.flush()?; - match self { - TrieFile::Disk(ref mut data) => { - data.fd.sync_data()?; - } - _ => {} + if let TrieFile::Disk(ref mut data) = self { + data.fd.sync_data()?; } Ok(offset) } diff --git a/stackslib/src/chainstate/stacks/index/marf.rs b/stackslib/src/chainstate/stacks/index/marf.rs index 7bec45bdfe..c3873d4cf9 100644 --- a/stackslib/src/chainstate/stacks/index/marf.rs +++ b/stackslib/src/chainstate/stacks/index/marf.rs @@ -1269,9 +1269,8 @@ impl MARF { // used in testing in order to short-circuit block-height lookups // when the trie struct is tested outside of marf.rs usage if height == 0 { - match storage.test_genesis_block { - Some(ref s) => return Ok(Some(s.clone())), - _ => {} + if let Some(ref s) = storage.test_genesis_block { + return Ok(Some(s.clone())); } } } diff --git a/stackslib/src/chainstate/stacks/index/proofs.rs b/stackslib/src/chainstate/stacks/index/proofs.rs index d66c1b72d3..2e53eca44a 100644 --- a/stackslib/src/chainstate/stacks/index/proofs.rs +++ b/stackslib/src/chainstate/stacks/index/proofs.rs @@ -1210,12 +1210,12 @@ impl TrieMerkleProof { }; // next proof item should be part of a segment proof - match proof[i] { - TrieMerkleProofType::Shunt(_) => { - test_debug!("Malformed proof -- exepcted segment proof following first shunt proof head at {}", i); - return false; - } - _ => {} + if let TrieMerkleProofType::Shunt(_) = proof[i] { + test_debug!( + "Malformed proof -- exepcted segment proof following first shunt proof head at {}", + i + ); + return false; } while i < proof.len() { diff --git a/stackslib/src/chainstate/stacks/index/storage.rs b/stackslib/src/chainstate/stacks/index/storage.rs index db99f8004e..efc19b0afb 100644 --- a/stackslib/src/chainstate/stacks/index/storage.rs +++ b/stackslib/src/chainstate/stacks/index/storage.rs @@ -1883,9 +1883,8 @@ impl<'a, T: MarfTrieId> TrieStorageTransaction<'a, T> { // blow away db trie_sql::clear_tables(self.sqlite_tx())?; - match self.data.uncommitted_writes { - Some((_, ref mut trie_storage)) => trie_storage.format()?, - None => {} + if let Some((_, ref mut trie_storage)) = self.data.uncommitted_writes { + trie_storage.format()? }; self.data.set_block(T::sentinel(), None); diff --git a/stackslib/src/chainstate/stacks/index/test/marf.rs b/stackslib/src/chainstate/stacks/index/test/marf.rs index ec099ef7cd..0df76cec4a 100644 --- a/stackslib/src/chainstate/stacks/index/test/marf.rs +++ b/stackslib/src/chainstate/stacks/index/test/marf.rs @@ -1270,11 +1270,8 @@ fn marf_insert_random_10485760_4096_file_storage() { } let path = "/tmp/rust_marf_insert_random_10485760_4096_file_storage".to_string(); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let marf_opts = MARFOpenOpts::default(); let f = TrieFileStorage::open(&path, marf_opts).unwrap(); @@ -1549,12 +1546,9 @@ fn marf_read_random_1048576_4096_file_storage() { for marf_opts in MARFOpenOpts::all().into_iter() { test_debug!("With {:?}", &marf_opts); let path = "/tmp/rust_marf_insert_random_1048576_4096_file_storage".to_string(); - match fs::metadata(&path) { - Err(_) => { - eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); - return; - } - Ok(_) => {} + if fs::metadata(&path).is_err() { + eprintln!("Run the marf_insert_random_1048576_4096_file_storage test first"); + return; }; let marf_opts = MARFOpenOpts::default(); let mut f_store = TrieFileStorage::new_memory(marf_opts).unwrap(); @@ -2166,7 +2160,7 @@ fn test_marf_begin_from_sentinel_twice() { #[test] fn test_marf_unconfirmed() { let marf_path = "/tmp/test_marf_unconfirmed"; - if let Ok(_) = std::fs::metadata(marf_path) { + if std::fs::metadata(marf_path).is_ok() { std::fs::remove_file(marf_path).unwrap(); } let marf_opts = MARFOpenOpts::default(); diff --git a/stackslib/src/chainstate/stacks/miner.rs b/stackslib/src/chainstate/stacks/miner.rs index 9e661e4460..9e5fd383b9 100644 --- a/stackslib/src/chainstate/stacks/miner.rs +++ b/stackslib/src/chainstate/stacks/miner.rs @@ -1137,24 +1137,20 @@ impl<'a> StacksMicroblockBuilder<'a> { TransactionResult::Skipped(TransactionSkipped { error, .. }) | TransactionResult::ProcessingError(TransactionError { error, .. }) => { test_debug!("Exclude tx {} from microblock", tx.txid()); - match &error { - Error::BlockTooBigError => { - // done mining -- our execution budget is exceeded. - // Make the block from the transactions we did manage to get - test_debug!("Block budget exceeded on tx {}", &tx.txid()); - if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { - test_debug!("Switch to mining stx-transfers only"); - block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; - } else if block_limit_hit - == BlockLimitFunction::CONTRACT_LIMIT_HIT - { - test_debug!( - "Stop mining microblock block due to limit exceeded" - ); - break; - } + if let Error::BlockTooBigError = &error { + // done mining -- our execution budget is exceeded. + // Make the block from the transactions we did manage to get + test_debug!("Block budget exceeded on tx {}", &tx.txid()); + if block_limit_hit == BlockLimitFunction::NO_LIMIT_HIT { + test_debug!("Switch to mining stx-transfers only"); + block_limit_hit = BlockLimitFunction::CONTRACT_LIMIT_HIT; + } else if block_limit_hit == BlockLimitFunction::CONTRACT_LIMIT_HIT + { + test_debug!( + "Stop mining microblock block due to limit exceeded" + ); + break; } - _ => {} } continue; } @@ -1188,12 +1184,9 @@ impl<'a> StacksMicroblockBuilder<'a> { self.runtime.considered.replace(considered); self.runtime.num_mined = num_txs; - match result { - Err(e) => { - warn!("Error producing microblock: {}", e); - return Err(e); - } - _ => {} + if let Err(e) = result { + warn!("Error producing microblock: {}", e); + return Err(e); } return self.make_next_microblock(txs_included, miner_key, tx_events, None); diff --git a/stackslib/src/chainstate/stacks/mod.rs b/stackslib/src/chainstate/stacks/mod.rs index a16fbeddf4..f82da31499 100644 --- a/stackslib/src/chainstate/stacks/mod.rs +++ b/stackslib/src/chainstate/stacks/mod.rs @@ -1568,11 +1568,8 @@ pub mod test { } for tx in all_txs.into_iter() { - match tx.payload { - TransactionPayload::Coinbase(..) => { - continue; - } - _ => {} + if let TransactionPayload::Coinbase(..) = tx.payload { + continue; } txs_anchored.push(tx); if txs_anchored.len() >= num_txs { diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs index 28595d4e19..259e2bf949 100644 --- a/stackslib/src/chainstate/stacks/tests/mod.rs +++ b/stackslib/src/chainstate/stacks/tests/mod.rs @@ -338,11 +338,8 @@ impl TestStacksNode { panic!("Tried to fork an unforkable chainstate instance"); } - match fs::metadata(&chainstate_path(new_test_name)) { - Ok(_) => { - fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); - } - Err(_) => {} + if fs::metadata(&chainstate_path(new_test_name)).is_ok() { + fs::remove_dir_all(&chainstate_path(new_test_name)).unwrap(); } copy_dir( @@ -524,21 +521,14 @@ impl TestStacksNode { fork_tip: &BlockSnapshot, miner: &TestMiner, ) -> Option { - for commit_op in miner.block_commits.iter().rev() { - match SortitionDB::get_block_snapshot_for_winning_stacks_block( + miner.block_commits.iter().rev().find_map(|commit_op| { + SortitionDB::get_block_snapshot_for_winning_stacks_block( ic, &fork_tip.sortition_id, &commit_op.block_header_hash, ) .unwrap() - { - Some(sn) => { - return Some(sn); - } - None => {} - } - } - return None; + }) } pub fn get_miner_balance(clarity_tx: &mut ClarityTx, addr: &StacksAddress) -> u128 { @@ -1404,11 +1394,8 @@ pub fn instantiate_and_exec( post_flight_callback: Option>, ) -> StacksChainState { let path = chainstate_path(test_name); - match fs::metadata(&path) { - Ok(_) => { - fs::remove_dir_all(&path).unwrap(); - } - Err(_) => {} + if fs::metadata(&path).is_ok() { + fs::remove_dir_all(&path).unwrap(); }; let initial_balances = balances diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs index 1a08d629e8..2ecddd947a 100644 --- a/stackslib/src/chainstate/stacks/transaction.rs +++ b/stackslib/src/chainstate/stacks/transaction.rs @@ -1130,17 +1130,14 @@ impl StacksTransactionSigner { } pub fn sign_sponsor(&mut self, privk: &StacksPrivateKey) -> Result<(), net_error> { - match self.tx.auth { - TransactionAuth::Sponsored(_, ref sponsor_condition) => { - if self.check_oversign - && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() - { - return Err(net_error::SigningError( - "Sponsor would have too many signatures".to_string(), - )); - } + if let TransactionAuth::Sponsored(_, ref sponsor_condition) = self.tx.auth { + if self.check_oversign + && sponsor_condition.num_signatures() >= sponsor_condition.signatures_required() + { + return Err(net_error::SigningError( + "Sponsor would have too many signatures".to_string(), + )); } - _ => {} } let next_sighash = self.tx.sign_next_sponsor(&self.sighash, privk)?; @@ -1930,24 +1927,21 @@ mod test { // test_debug!("mutate byte {}", &i); let mut cursor = io::Cursor::new(&tx_bytes); let mut reader = LogReader::from_reader(&mut cursor); - match StacksTransaction::consensus_deserialize(&mut reader) { - Ok(corrupt_tx) => { - let mut corrupt_tx_bytes = vec![]; - corrupt_tx - .consensus_serialize(&mut corrupt_tx_bytes) - .unwrap(); - if corrupt_tx_bytes.len() < tx_bytes.len() { - // didn't parse fully; the block-parsing logic would reject this block. - tx_bytes[i] = next_byte as u8; - continue; - } - if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { - eprintln!("corrupt tx: {:#?}", &corrupt_tx); - eprintln!("signed tx: {:#?}", &signed_tx); - assert!(false); - } + if let Ok(corrupt_tx) = StacksTransaction::consensus_deserialize(&mut reader) { + let mut corrupt_tx_bytes = vec![]; + corrupt_tx + .consensus_serialize(&mut corrupt_tx_bytes) + .unwrap(); + if corrupt_tx_bytes.len() < tx_bytes.len() { + // didn't parse fully; the block-parsing logic would reject this block. + tx_bytes[i] = next_byte as u8; + continue; + } + if corrupt_tx.verify().is_ok() && corrupt_tx != *signed_tx { + eprintln!("corrupt tx: {:#?}", &corrupt_tx); + eprintln!("signed tx: {:#?}", &signed_tx); + assert!(false); } - Err(_) => {} } // restore tx_bytes[i] = next_byte as u8; diff --git a/stackslib/src/clarity_cli.rs b/stackslib/src/clarity_cli.rs index 32ef034098..8e11ff613c 100644 --- a/stackslib/src/clarity_cli.rs +++ b/stackslib/src/clarity_cli.rs @@ -642,7 +642,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); Some(BurnchainHeaderHash(hash_bytes.0)) } else { @@ -657,7 +657,7 @@ impl HeadersDB for CLIHeadersDB { ) -> Option { // mock it let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { let hash_bytes = Hash160::from_data(&id_bhh.0); Some(ConsensusHash(hash_bytes.0)) } else { @@ -671,7 +671,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); @@ -687,7 +687,7 @@ impl HeadersDB for CLIHeadersDB { _epoch: &StacksEpochId, ) -> Option { let conn = self.conn(); - if let Some(_) = get_cli_block_height(conn, id_bhh) { + if get_cli_block_height(conn, id_bhh).is_some() { // mock it, but make it unique let hash_bytes = Sha512Trunc256Sum::from_data(&id_bhh.0); let hash_bytes_2 = Sha512Trunc256Sum::from_data(&hash_bytes.0); diff --git a/stackslib/src/clarity_vm/database/mod.rs b/stackslib/src/clarity_vm/database/mod.rs index 6f770f5927..e03149dba4 100644 --- a/stackslib/src/clarity_vm/database/mod.rs +++ b/stackslib/src/clarity_vm/database/mod.rs @@ -737,13 +737,15 @@ fn get_first_block_in_tenure( } } None => { - if let Some(_) = get_stacks_header_column_from_table( + if get_stacks_header_column_from_table( conn.conn(), id_bhh, "consensus_hash", &|r| ConsensusHash::from_row(r).expect("FATAL: malformed consensus_hash"), false, - ) { + ) + .is_some() + { return id_bhh.clone().into(); } else { get_stacks_header_column_from_table( diff --git a/stackslib/src/cli.rs b/stackslib/src/cli.rs index 286e7f1854..f43812f2ba 100644 --- a/stackslib/src/cli.rs +++ b/stackslib/src/cli.rs @@ -40,7 +40,7 @@ use crate::chainstate::burn::db::sortdb::{ }; use crate::chainstate::burn::{BlockSnapshot, ConsensusHash}; use crate::chainstate::coordinator::OnChainRewardSetProvider; -use crate::chainstate::nakamoto::miner::{NakamotoBlockBuilder, NakamotoTenureInfo}; +use crate::chainstate::nakamoto::miner::{BlockMetadata, NakamotoBlockBuilder, NakamotoTenureInfo}; use crate::chainstate::nakamoto::{NakamotoBlock, NakamotoChainState}; use crate::chainstate::stacks::db::blocks::StagingBlock; use crate::chainstate::stacks::db::{StacksBlockHeaderTypes, StacksChainState, StacksHeaderInfo}; @@ -504,7 +504,21 @@ pub fn command_try_mine(argv: &[String], conf: Option<&Config>) { None, 0, ) - .map(|(block, cost, size, _)| (block.header.block_hash(), block.txs, cost, size)) + .map( + |BlockMetadata { + block, + tenure_consumed, + tenure_size, + .. + }| { + ( + block.header.block_hash(), + block.txs, + tenure_consumed, + tenure_size, + ) + }, + ) } }; diff --git a/stackslib/src/config/mod.rs b/stackslib/src/config/mod.rs index b9b9bf5204..a9a03d4861 100644 --- a/stackslib/src/config/mod.rs +++ b/stackslib/src/config/mod.rs @@ -16,7 +16,7 @@ pub mod chain_data; -use std::collections::{HashMap, HashSet}; +use std::collections::{BTreeMap, HashMap, HashSet}; use std::net::{Ipv4Addr, SocketAddr, ToSocketAddrs}; use std::path::PathBuf; use std::str::FromStr; @@ -86,18 +86,41 @@ pub const OP_TX_ANY_ESTIM_SIZE: u64 = fmax!( OP_TX_VOTE_AGG_ESTIM_SIZE ); +/// Default maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate +/// may be increased to when RBFing a transaction const DEFAULT_MAX_RBF_RATE: u64 = 150; // 1.5x +/// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin +/// transaction const DEFAULT_RBF_FEE_RATE_INCREMENT: u64 = 5; +/// Default number of reward cycles of blocks to sync in a non-full inventory +/// sync const INV_REWARD_CYCLES_TESTNET: u64 = 6; +/// Default minimum time to wait between mining blocks in milliseconds. The +/// value must be greater than or equal to 1000 ms because if a block is mined +/// within the same second as its parent, it will be rejected by the signers. const DEFAULT_MIN_TIME_BETWEEN_BLOCKS_MS: u64 = 1_000; +/// Default time in milliseconds to pause after receiving the first threshold +/// rejection, before proposing a new block. const DEFAULT_FIRST_REJECTION_PAUSE_MS: u64 = 5_000; +/// Default time in milliseconds to pause after receiving subsequent threshold +/// rejections, before proposing a new block. const DEFAULT_SUBSEQUENT_REJECTION_PAUSE_MS: u64 = 10_000; +/// Default time in milliseconds to wait for a Nakamoto block after seeing a +/// burnchain block before submitting a block commit. const DEFAULT_BLOCK_COMMIT_DELAY_MS: u64 = 20_000; +/// Default percentage of the remaining tenure cost limit to consume each block const DEFAULT_TENURE_COST_LIMIT_PER_BLOCK_PERCENTAGE: u8 = 25; +/// Default number of seconds to wait in-between polling the sortition DB to +/// see if we need to extend the ongoing tenure (e.g. because the current +/// sortition is empty or invalid). const DEFAULT_TENURE_EXTEND_POLL_SECS: u64 = 1; - -// This should be greater than the signers' timeout. This is used for issuing fallback tenure extends -const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 420; +/// Default duration to wait before attempting to issue a tenure extend. +/// This should be greater than the signers' timeout. This is used for issuing +/// fallback tenure extends +const DEFAULT_TENURE_TIMEOUT_SECS: u64 = 180; +/// Default percentage of block budget that must be used before attempting a +/// time-based tenure extend +const DEFAULT_TENURE_EXTEND_COST_THRESHOLD: u64 = 50; static HELIUM_DEFAULT_CONNECTION_OPTIONS: LazyLock = LazyLock::new(|| ConnectionOptions { @@ -1191,9 +1214,13 @@ pub struct BurnchainConfig { pub process_exit_at_block_height: Option, pub poll_time_secs: u64, pub satoshis_per_byte: u64, + /// Maximum percentage of `satoshis_per_byte` that a Bitcoin fee rate may + /// be increased to when RBFing a transaction pub max_rbf: u64, pub leader_key_tx_estimated_size: u64, pub block_commit_tx_estimated_size: u64, + /// Amount to increment the fee by, in Sats/vByte, when RBFing a Bitcoin + /// transaction pub rbf_fee_increment: u64, pub first_burn_block_height: Option, pub first_burn_block_timestamp: Option, @@ -2155,6 +2182,10 @@ pub struct MinerConfig { pub tenure_extend_poll_secs: Duration, /// Duration to wait before attempting to issue a tenure extend pub tenure_timeout: Duration, + /// Percentage of block budget that must be used before attempting a time-based tenure extend + pub tenure_extend_cost_threshold: u64, + /// Define the timeout to apply while waiting for signers responses, based on the amount of rejections + pub block_rejection_timeout_steps: HashMap, } impl Default for MinerConfig { @@ -2193,6 +2224,16 @@ impl Default for MinerConfig { ), tenure_extend_poll_secs: Duration::from_secs(DEFAULT_TENURE_EXTEND_POLL_SECS), tenure_timeout: Duration::from_secs(DEFAULT_TENURE_TIMEOUT_SECS), + tenure_extend_cost_threshold: DEFAULT_TENURE_EXTEND_COST_THRESHOLD, + + block_rejection_timeout_steps: { + let mut rejections_timeouts_default_map = HashMap::::new(); + rejections_timeouts_default_map.insert(0, Duration::from_secs(600)); + rejections_timeouts_default_map.insert(10, Duration::from_secs(300)); + rejections_timeouts_default_map.insert(20, Duration::from_secs(150)); + rejections_timeouts_default_map.insert(30, Duration::from_secs(0)); + rejections_timeouts_default_map + }, } } } @@ -2589,6 +2630,8 @@ pub struct MinerConfigFile { pub tenure_cost_limit_per_block_percentage: Option, pub tenure_extend_poll_secs: Option, pub tenure_timeout_secs: Option, + pub tenure_extend_cost_threshold: Option, + pub block_rejection_timeout_steps: Option>, } impl MinerConfigFile { @@ -2731,6 +2774,25 @@ impl MinerConfigFile { tenure_cost_limit_per_block_percentage, tenure_extend_poll_secs: self.tenure_extend_poll_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_extend_poll_secs), tenure_timeout: self.tenure_timeout_secs.map(Duration::from_secs).unwrap_or(miner_default_config.tenure_timeout), + tenure_extend_cost_threshold: self.tenure_extend_cost_threshold.unwrap_or(miner_default_config.tenure_extend_cost_threshold), + + block_rejection_timeout_steps: { + if let Some(block_rejection_timeout_items) = self.block_rejection_timeout_steps { + let mut rejection_timeout_durations = HashMap::::new(); + for (slice, seconds) in block_rejection_timeout_items.iter() { + match slice.parse::() { + Ok(slice_slot) => rejection_timeout_durations.insert(slice_slot, Duration::from_secs(*seconds)), + Err(e) => panic!("block_rejection_timeout_steps keys must be unsigned integers: {}", e) + }; + } + if !rejection_timeout_durations.contains_key(&0) { + panic!("block_rejection_timeout_steps requires a definition for the '0' key/step"); + } + rejection_timeout_durations + } else{ + miner_default_config.block_rejection_timeout_steps + } + } }) } } diff --git a/stackslib/src/core/mempool.rs b/stackslib/src/core/mempool.rs index e7ff24db78..d21f46c3c1 100644 --- a/stackslib/src/core/mempool.rs +++ b/stackslib/src/core/mempool.rs @@ -1092,11 +1092,8 @@ impl NonceCache { }; // In-memory cache - match self.cache.get_mut(&address) { - Some(nonce) => { - *nonce = value; - } - None => (), + if let Some(nonce) = self.cache.get_mut(&address) { + *nonce = value; } success diff --git a/stackslib/src/main.rs b/stackslib/src/main.rs index 8731b78f42..6ad88d0b68 100644 --- a/stackslib/src/main.rs +++ b/stackslib/src/main.rs @@ -1522,7 +1522,7 @@ check if the associated microblocks can be downloaded while next_arrival < stacks_blocks_arrival_order.len() && known_stacks_blocks.contains(&stacks_block_id) { - if let Some(_) = stacks_blocks_available.get(&stacks_block_id) { + if stacks_blocks_available.get(&stacks_block_id).is_some() { // load up the block let stacks_block_opt = StacksChainState::load_block( &old_chainstate.blocks_path, diff --git a/stackslib/src/net/atlas/mod.rs b/stackslib/src/net/atlas/mod.rs index c382aa618d..49d1036a0b 100644 --- a/stackslib/src/net/atlas/mod.rs +++ b/stackslib/src/net/atlas/mod.rs @@ -195,45 +195,42 @@ impl AttachmentInstance { ) -> Option { if let Value::Tuple(ref attachment) = value { if let Ok(Value::Tuple(ref attachment_data)) = attachment.get("attachment") { - match ( + if let ( + Ok(Value::Sequence(SequenceData::Buffer(content_hash))), + Ok(Value::UInt(attachment_index)), + ) = ( attachment_data.get("hash"), attachment_data.get("attachment-index"), ) { - ( - Ok(Value::Sequence(SequenceData::Buffer(content_hash))), - Ok(Value::UInt(attachment_index)), - ) => { - let content_hash = if content_hash.data.is_empty() { - Hash160::empty() - } else { - match Hash160::from_bytes(&content_hash.data[..]) { - Some(content_hash) => content_hash, - _ => return None, - } - }; - let metadata = match attachment_data.get("metadata") { - Ok(metadata) => { - let mut serialized = vec![]; - metadata - .consensus_serialize(&mut serialized) - .expect("FATAL: invalid metadata"); - to_hex(&serialized[..]) - } - _ => String::new(), - }; - let instance = AttachmentInstance { - index_block_hash, - content_hash, - attachment_index: *attachment_index as u32, - stacks_block_height, - metadata, - contract_id: contract_id.clone(), - tx_id, - canonical_stacks_tip_height, - }; - return Some(instance); - } - _ => {} + let content_hash = if content_hash.data.is_empty() { + Hash160::empty() + } else { + match Hash160::from_bytes(&content_hash.data[..]) { + Some(content_hash) => content_hash, + _ => return None, + } + }; + let metadata = match attachment_data.get("metadata") { + Ok(metadata) => { + let mut serialized = vec![]; + metadata + .consensus_serialize(&mut serialized) + .expect("FATAL: invalid metadata"); + to_hex(&serialized[..]) + } + _ => String::new(), + }; + let instance = AttachmentInstance { + index_block_hash, + content_hash, + attachment_index: *attachment_index as u32, + stacks_block_height, + metadata, + contract_id: contract_id.clone(), + tx_id, + canonical_stacks_tip_height, + }; + return Some(instance); } } } diff --git a/stackslib/src/net/chat.rs b/stackslib/src/net/chat.rs index c24bde9bce..2b16a4ac06 100644 --- a/stackslib/src/net/chat.rs +++ b/stackslib/src/net/chat.rs @@ -515,13 +515,10 @@ impl Neighbor { // setting BLOCKSTACK_NEIGHBOR_TEST_${PORTNUMBER} will let us select an organization // for this peer use std::env; - match env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port)) { - Ok(asn_str) => { - neighbor.asn = asn_str.parse().unwrap(); - neighbor.org = neighbor.asn; - test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); - } - Err(_) => {} + if let Ok(asn_str) = env::var(format!("BLOCKSTACK_NEIGHBOR_TEST_{}", addr.port)) { + neighbor.asn = asn_str.parse().unwrap(); + neighbor.org = neighbor.asn; + test_debug!("Override {:?} to ASN/org {}", &neighbor.addr, neighbor.asn); }; } @@ -544,13 +541,10 @@ impl Neighbor { let asn_opt = PeerDB::asn_lookup(conn, &addr.addrbytes).map_err(net_error::DBError)?; - match asn_opt { - Some(a) => { - if a != 0 { - peer.asn = a; - } + if let Some(a) = asn_opt { + if a != 0 { + peer.asn = a; } - None => {} }; } Ok(Some(peer)) @@ -3100,11 +3094,8 @@ mod test { services: u16, ) -> (PeerDB, SortitionDB, StackerDBs, PoxId, StacksChainState) { let test_path = format!("/tmp/stacks-test-databases-{}", testname); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); diff --git a/stackslib/src/net/connection.rs b/stackslib/src/net/connection.rs index 09465721ba..6aa8aa8c08 100644 --- a/stackslib/src/net/connection.rs +++ b/stackslib/src/net/connection.rs @@ -379,6 +379,7 @@ pub struct ConnectionOptions { /// Units are milliseconds. A value of 0 means "never". pub log_neighbors_freq: u64, pub inv_sync_interval: u64, + // how many reward cycles of blocks to sync in a non-full inventory sync pub inv_reward_cycles: u64, pub download_interval: u64, pub pingback_timeout: u64, @@ -926,19 +927,16 @@ impl ConnectionInbox

{ let bytes_consumed = if let Some(ref mut preamble) = preamble_opt { let (message_opt, bytes_consumed) = self.consume_payload(protocol, preamble, &buf[offset..])?; - match message_opt { - Some(message) => { - // queue up - test_debug!( - "Consumed message '{}' (request {}) in {} bytes", - message.get_message_name(), - message.request_id(), - bytes_consumed - ); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!( + "Consumed message '{}' (request {}) in {} bytes", + message.get_message_name(), + message.request_id(), + bytes_consumed + ); + self.inbox.push_back(message); + consumed_message = true; }; bytes_consumed @@ -982,14 +980,11 @@ impl ConnectionInbox

{ if let Some(ref mut preamble) = preamble_opt { let (message_opt, _bytes_consumed) = self.consume_payload(protocol, preamble, &[])?; - match message_opt { - Some(message) => { - // queue up - test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); - self.inbox.push_back(message); - consumed_message = true; - } - None => {} + if let Some(message) = message_opt { + // queue up + test_debug!("Consumed buffered message '{}' (request {}) from {} input buffer bytes", message.get_message_name(), message.request_id(), _bytes_consumed); + self.inbox.push_back(message); + consumed_message = true; } } self.preamble = preamble_opt; diff --git a/stackslib/src/net/db.rs b/stackslib/src/net/db.rs index 3c31ce41d4..4b5bee8975 100644 --- a/stackslib/src/net/db.rs +++ b/stackslib/src/net/db.rs @@ -158,7 +158,7 @@ impl LocalPeer { "Will be authenticating p2p messages with the following"; "public key" => &Secp256k1PublicKey::from_private(&pkey).to_hex(), "services" => &to_hex(&services.to_be_bytes()), - "Stacker DBs" => stacker_dbs.iter().map(|cid| format!("{}", &cid)).collect::>().join(",") + "Stacker DBs" => stacker_dbs.iter().map(|cid| cid.to_string()).collect::>().join(",") ); LocalPeer { diff --git a/stackslib/src/net/dns.rs b/stackslib/src/net/dns.rs index d94f45d4d6..1a9e8278bd 100644 --- a/stackslib/src/net/dns.rs +++ b/stackslib/src/net/dns.rs @@ -377,13 +377,10 @@ mod test { let mut resolved_addrs = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("www.google.com", 80).unwrap() { - Some(addrs) => { - test_debug!("addrs: {:?}", &addrs); - resolved_addrs = Some(addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup("www.google.com", 80).unwrap() { + test_debug!("addrs: {:?}", &addrs); + resolved_addrs = Some(addrs); + break; } sleep_ms(100); } @@ -423,13 +420,10 @@ mod test { if resolved_addrs.contains_key(*name) { continue; } - match client.poll_lookup(name, 80).unwrap() { - Some(addrs) => { - test_debug!("name {name} addrs: {addrs:?}"); - resolved_addrs.insert(name.to_string(), addrs); - break; - } - None => {} + if let Some(addrs) = client.poll_lookup(name, 80).unwrap() { + test_debug!("name {name} addrs: {addrs:?}"); + resolved_addrs.insert(name.to_string(), addrs); + break; } } @@ -452,13 +446,10 @@ mod test { let mut resolved_error = None; loop { client.try_recv().unwrap(); - match client.poll_lookup("asdfjkl;", 80).unwrap() { - Some(resp) => { - test_debug!("addrs: {:?}", &resp); - resolved_error = Some(resp); - break; - } - None => {} + if let Some(resp) = client.poll_lookup("asdfjkl;", 80).unwrap() { + test_debug!("addrs: {:?}", &resp); + resolved_error = Some(resp); + break; } sleep_ms(100); } diff --git a/stackslib/src/net/download/epoch2x.rs b/stackslib/src/net/download/epoch2x.rs index ea9076ccd7..f832457259 100644 --- a/stackslib/src/net/download/epoch2x.rs +++ b/stackslib/src/net/download/epoch2x.rs @@ -1045,9 +1045,8 @@ impl PeerNetwork { /// Pass a hint to the downloader to re-scan pub fn hint_download_rescan(&mut self, target_height: u64, ibd: bool) { - match self.block_downloader { - Some(ref mut dl) => dl.hint_download_rescan(target_height, ibd), - None => {} + if let Some(ref mut dl) = self.block_downloader { + dl.hint_download_rescan(target_height, ibd) } } @@ -1978,11 +1977,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.blocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2016,11 +2014,10 @@ impl PeerNetwork { for sortition_height in priority.into_iter() { match downloader.microblocks_to_try.get_mut(&sortition_height) { Some(ref mut keys) => { - match PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) { - Some((key, handle)) => { - requests.insert(key.clone(), handle); - } - None => {} + if let Some((key, handle)) = + PeerNetwork::begin_request(network, &downloader.dns_lookups, keys) + { + requests.insert(key.clone(), handle); } } None => { @@ -2480,9 +2477,8 @@ impl PeerNetwork { if done { // reset state if we're done - match self.block_downloader { - Some(ref mut downloader) => downloader.reset(), - None => {} + if let Some(ref mut downloader) = self.block_downloader { + downloader.reset() } } diff --git a/stackslib/src/net/http/request.rs b/stackslib/src/net/http/request.rs index dd5ad7e046..8ccb214146 100644 --- a/stackslib/src/net/http/request.rs +++ b/stackslib/src/net/http/request.rs @@ -273,29 +273,23 @@ impl StacksMessageCodec for HttpRequestPreamble { .map_err(CodecError::WriteError)?; // content-type - match self.content_type { - Some(ref c) => { - fd.write_all("Content-Type: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(c.to_string().as_str().as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(ref c) = self.content_type { + fd.write_all("Content-Type: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(c.to_string().as_str().as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // content-length - match self.content_length { - Some(l) => { - fd.write_all("Content-Length: ".as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all(format!("{}", l).as_bytes()) - .map_err(CodecError::WriteError)?; - fd.write_all("\r\n".as_bytes()) - .map_err(CodecError::WriteError)?; - } - None => {} + if let Some(l) = self.content_length { + fd.write_all("Content-Length: ".as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all(format!("{}", l).as_bytes()) + .map_err(CodecError::WriteError)?; + fd.write_all("\r\n".as_bytes()) + .map_err(CodecError::WriteError)?; } // keep-alive diff --git a/stackslib/src/net/httpcore.rs b/stackslib/src/net/httpcore.rs index 2a47be3547..a7e96a1912 100644 --- a/stackslib/src/net/httpcore.rs +++ b/stackslib/src/net/httpcore.rs @@ -1232,25 +1232,22 @@ impl StacksHttp { /// This method will set up this state machine to consume the message associated with this /// premable, if the response is chunked. fn set_preamble(&mut self, preamble: &StacksHttpPreamble) -> Result<(), NetError> { - match preamble { - StacksHttpPreamble::Response(ref http_response_preamble) => { - // we can only receive a response if we're expecting it - if self.request_handler_index.is_none() && !self.allow_arbitrary_response { - return Err(NetError::DeserializeError( - "Unexpected HTTP response: no active request handler".to_string(), - )); + if let StacksHttpPreamble::Response(ref http_response_preamble) = preamble { + // we can only receive a response if we're expecting it + if self.request_handler_index.is_none() && !self.allow_arbitrary_response { + return Err(NetError::DeserializeError( + "Unexpected HTTP response: no active request handler".to_string(), + )); + } + if http_response_preamble.is_chunked() { + // we can only receive one response at a time + if self.reply.is_some() { + test_debug!("Have pending reply already"); + return Err(NetError::InProgress); } - if http_response_preamble.is_chunked() { - // we can only receive one response at a time - if self.reply.is_some() { - test_debug!("Have pending reply already"); - return Err(NetError::InProgress); - } - self.set_pending(http_response_preamble); - } + self.set_pending(http_response_preamble); } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/inv/epoch2x.rs b/stackslib/src/net/inv/epoch2x.rs index 758ffe3758..430189c41e 100644 --- a/stackslib/src/net/inv/epoch2x.rs +++ b/stackslib/src/net/inv/epoch2x.rs @@ -1534,15 +1534,12 @@ impl PeerNetwork { } // does the peer agree with our PoX view up to this reward cycle? - match stats.inv.pox_inv_cmp(&self.pox_id) { - Some((disagreed, _, _)) => { - if disagreed < target_block_reward_cycle { - // can't proceed - debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); - return Ok(0); - } + if let Some((disagreed, _, _)) = stats.inv.pox_inv_cmp(&self.pox_id) { + if disagreed < target_block_reward_cycle { + // can't proceed + debug!("{:?}: remote neighbor {:?} disagrees with our PoX inventory at reward cycle {} (asked for {})", &self.local_peer, nk, disagreed, target_block_reward_cycle); + return Ok(0); } - None => {} } let target_block_height = self @@ -2517,13 +2514,10 @@ impl PeerNetwork { let mut cur_neighbors = HashSet::new(); for (nk, event_id) in self.events.iter() { // only outbound authenticated peers - match self.peers.get(event_id) { - Some(convo) => { - if convo.is_outbound() && convo.is_authenticated() { - cur_neighbors.insert(nk.clone()); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.is_outbound() && convo.is_authenticated() { + cur_neighbors.insert(nk.clone()); } - None => {} } } @@ -2537,17 +2531,14 @@ impl PeerNetwork { /// Set a hint that we learned something new, and need to sync invs again pub fn hint_sync_invs(&mut self, target_height: u64) { - match self.inv_state { - Some(ref mut inv_state) => { - debug!( - "Awaken inv sync to re-scan peer block inventories at height {}", - target_height - ); - inv_state.hint_learned_data = true; - inv_state.hint_do_rescan = true; - inv_state.hint_learned_data_height = target_height; - } - None => {} + if let Some(ref mut inv_state) = self.inv_state { + debug!( + "Awaken inv sync to re-scan peer block inventories at height {}", + target_height + ); + inv_state.hint_learned_data = true; + inv_state.hint_do_rescan = true; + inv_state.hint_learned_data_height = target_height; } } diff --git a/stackslib/src/net/mod.rs b/stackslib/src/net/mod.rs index 29b0e17bdc..6e6870cbfb 100644 --- a/stackslib/src/net/mod.rs +++ b/stackslib/src/net/mod.rs @@ -2376,11 +2376,8 @@ pub mod test { if self.closed { return Ok(0); } - match self.read_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.read_error { + return Err(io::Error::from((*e).clone())); } let sz = self.c.read(buf)?; @@ -2403,11 +2400,8 @@ pub mod test { if self.closed { return Err(io::Error::from(ErrorKind::Other)); // EBADF } - match self.write_error { - Some(ref e) => { - return Err(io::Error::from((*e).clone())); - } - None => {} + if let Some(ref e) = self.write_error { + return Err(io::Error::from((*e).clone())); } self.c.write(buf) } @@ -2799,11 +2793,8 @@ pub mod test { pub fn make_test_path(config: &TestPeerConfig) -> String { let test_path = TestPeer::test_path(config); - match fs::metadata(&test_path) { - Ok(_) => { - fs::remove_dir_all(&test_path).unwrap(); - } - Err(_) => {} + if fs::metadata(&test_path).is_ok() { + fs::remove_dir_all(&test_path).unwrap(); }; fs::create_dir_all(&test_path).unwrap(); @@ -3558,11 +3549,8 @@ pub mod test { ch: &ConsensusHash, ) { for op in blockstack_ops.iter_mut() { - match op { - BlockstackOperationType::LeaderKeyRegister(ref mut data) => { - data.consensus_hash = (*ch).clone(); - } - _ => {} + if let BlockstackOperationType::LeaderKeyRegister(ref mut data) = op { + data.consensus_hash = (*ch).clone(); } } } diff --git a/stackslib/src/net/neighbors/mod.rs b/stackslib/src/net/neighbors/mod.rs index cc3fd73db8..f0d3cf18b7 100644 --- a/stackslib/src/net/neighbors/mod.rs +++ b/stackslib/src/net/neighbors/mod.rs @@ -388,11 +388,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier table size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier table size: {}", &self.local_peer, count); }; debug!("{:?}: Walk finished ===================", &self.local_peer); } diff --git a/stackslib/src/net/p2p.rs b/stackslib/src/net/p2p.rs index 310340de93..fd8561326a 100644 --- a/stackslib/src/net/p2p.rs +++ b/stackslib/src/net/p2p.rs @@ -1145,13 +1145,10 @@ impl PeerNetwork { ) -> u64 { let mut ret = 0; for (_, socket) in sockets.iter() { - match socket.peer_addr() { - Ok(addr) => { - if addr.ip() == ipaddr.ip() { - ret += 1; - } + if let Ok(addr) = socket.peer_addr() { + if addr.ip() == ipaddr.ip() { + ret += 1; } - Err(_) => {} }; } ret @@ -1378,12 +1375,9 @@ impl PeerNetwork { NetworkRequest::Ban(neighbor_keys) => { for neighbor_key in neighbor_keys.iter() { info!("Request to ban {:?}", neighbor_key); - match self.events.get(neighbor_key) { - Some(event_id) => { - debug!("Will ban {:?} (event {})", neighbor_key, event_id); - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor_key) { + debug!("Will ban {:?} (event {})", neighbor_key, event_id); + self.bans.insert(*event_id); } } Ok(()) @@ -1466,28 +1460,25 @@ impl PeerNetwork { // receive all in-bound requests for i in 0..self.handles.len() { - match self.handles.get(i) { - Some(handle) => { - loop { - // drain all inbound requests - let inbound_request_res = handle.chan_in.try_recv(); - match inbound_request_res { - Ok(inbound_request) => { - messages.push((i, inbound_request)); - } - Err(TryRecvError::Empty) => { - // nothing to do - break; - } - Err(TryRecvError::Disconnected) => { - // dead; remove - to_remove.push(i); - break; - } + if let Some(handle) = self.handles.get(i) { + loop { + // drain all inbound requests + let inbound_request_res = handle.chan_in.try_recv(); + match inbound_request_res { + Ok(inbound_request) => { + messages.push((i, inbound_request)); + } + Err(TryRecvError::Empty) => { + // nothing to do + break; + } + Err(TryRecvError::Disconnected) => { + // dead; remove + to_remove.push(i); + break; } } } - None => {} } } @@ -1885,11 +1876,8 @@ impl PeerNetwork { /// Deregister a socket from our p2p network instance. fn deregister_socket(&mut self, event_id: usize, socket: mio_net::TcpStream) { - match self.network { - Some(ref mut network) => { - let _ = network.deregister(event_id, &socket); - } - None => {} + if let Some(ref mut network) = self.network { + let _ = network.deregister(event_id, &socket); } } @@ -1969,11 +1957,8 @@ impl PeerNetwork { /// Deregister and ban a neighbor pub fn deregister_and_ban_neighbor(&mut self, neighbor: &NeighborKey) { debug!("Disconnect from and ban {:?}", neighbor); - match self.events.get(neighbor) { - Some(event_id) => { - self.bans.insert(*event_id); - } - None => {} + if let Some(event_id) = self.events.get(neighbor) { + self.bans.insert(*event_id); } self.relayer_stats.process_neighbor_ban(neighbor); diff --git a/stackslib/src/net/prune.rs b/stackslib/src/net/prune.rs index 1ad6b4ffc8..ac9cb361e5 100644 --- a/stackslib/src/net/prune.rs +++ b/stackslib/src/net/prune.rs @@ -322,18 +322,15 @@ impl PeerNetwork { if preserve.contains(event_id) { continue; } - match self.peers.get(event_id) { - Some(convo) => { - if !convo.stats.outbound { - let stats = convo.stats.clone(); - if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { - entry.push((*event_id, nk.clone(), stats)); - } else { - ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); - } + if let Some(convo) = self.peers.get(event_id) { + if !convo.stats.outbound { + let stats = convo.stats.clone(); + if let Some(entry) = ip_neighbor.get_mut(&nk.addrbytes) { + entry.push((*event_id, nk.clone(), stats)); + } else { + ip_neighbor.insert(nk.addrbytes, vec![(*event_id, nk.clone(), stats)]); } } - None => {} } } @@ -378,15 +375,12 @@ impl PeerNetwork { let mut outbound: Vec = vec![]; for (nk, event_id) in self.events.iter() { - match self.peers.get(event_id) { - Some(convo) => { - if convo.stats.outbound { - outbound.push(format!("{:?}", &nk)); - } else { - inbound.push(format!("{:?}", &nk)); - } + if let Some(convo) = self.peers.get(event_id) { + if convo.stats.outbound { + outbound.push(format!("{:?}", &nk)); + } else { + inbound.push(format!("{:?}", &nk)); } - None => {} } } (inbound, outbound) @@ -464,11 +458,8 @@ impl PeerNetwork { inbound.join(", ") ); - match PeerDB::get_frontier_size(self.peerdb.conn()) { - Ok(count) => { - debug!("{:?}: Frontier size: {}", &self.local_peer, count); - } - Err(_) => {} + if let Ok(count) = PeerDB::get_frontier_size(self.peerdb.conn()) { + debug!("{:?}: Frontier size: {}", &self.local_peer, count); }; } } diff --git a/stackslib/src/net/relay.rs b/stackslib/src/net/relay.rs index 7f3ba662fb..a3f0117c4a 100644 --- a/stackslib/src/net/relay.rs +++ b/stackslib/src/net/relay.rs @@ -1821,52 +1821,49 @@ impl Relayer { &tx.txid(), &ast_rules ); - match tx.payload { - TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) => { - let clarity_version = - clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); - - if ast_rules == ASTRules::PrecheckSize { - let origin = tx.get_origin(); - let issuer_principal = { - let addr = if mainnet { - origin.address_mainnet() - } else { - origin.address_testnet() - }; - addr.to_account_principal() - }; - let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { - data + if let TransactionPayload::SmartContract(ref smart_contract, ref clarity_version_opt) = + tx.payload + { + let clarity_version = + clarity_version_opt.unwrap_or(ClarityVersion::default_for_epoch(epoch_id)); + + if ast_rules == ASTRules::PrecheckSize { + let origin = tx.get_origin(); + let issuer_principal = { + let addr = if mainnet { + origin.address_mainnet() } else { - // not possible - panic!("Transaction had a contract principal origin"); + origin.address_testnet() }; + addr.to_account_principal() + }; + let issuer_principal = if let PrincipalData::Standard(data) = issuer_principal { + data + } else { + // not possible + panic!("Transaction had a contract principal origin"); + }; - let contract_id = QualifiedContractIdentifier::new( - issuer_principal, - smart_contract.name.clone(), - ); - let contract_code_str = smart_contract.code_body.to_string(); - - // make sure that the AST isn't unreasonably big - let ast_res = - ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); - match ast_res { - Ok(_) => {} - Err(parse_error) => match parse_error.err { - ParseErrors::ExpressionStackDepthTooDeep - | ParseErrors::VaryExpressionStackDepthTooDeep => { - // don't include this block - info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); - return Err(Error::ClarityError(parse_error.into())); - } - _ => {} - }, - } + let contract_id = + QualifiedContractIdentifier::new(issuer_principal, smart_contract.name.clone()); + let contract_code_str = smart_contract.code_body.to_string(); + + // make sure that the AST isn't unreasonably big + let ast_res = + ast_check_size(&contract_id, &contract_code_str, clarity_version, epoch_id); + match ast_res { + Ok(_) => {} + Err(parse_error) => match parse_error.err { + ParseErrors::ExpressionStackDepthTooDeep + | ParseErrors::VaryExpressionStackDepthTooDeep => { + // don't include this block + info!("Transaction {} is problematic and will not be included, relayed, or built upon", &tx.txid()); + return Err(Error::ClarityError(parse_error.into())); + } + _ => {} + }, } } - _ => {} } Ok(()) } diff --git a/stackslib/src/net/server.rs b/stackslib/src/net/server.rs index 7370f9cdf0..1df56c299b 100644 --- a/stackslib/src/net/server.rs +++ b/stackslib/src/net/server.rs @@ -429,56 +429,52 @@ impl HttpPeer { // get incoming bytes and update the state of this conversation. let mut convo_dead = false; let recv_res = convo.recv(client_sock); - match recv_res { - Err(e) => { - match e { - net_error::PermanentlyDrained => { - // socket got closed, but we might still have pending unsolicited messages - debug!( - "Remote HTTP peer disconnected event {} (socket {:?})", - event_id, &client_sock - ); - convo_dead = true; - } - net_error::InvalidMessage => { - // got sent bad data. If this was an inbound conversation, send it a HTTP - // 400 and close the socket. - debug!("Got a bad HTTP message on socket {:?}", &client_sock); - match convo.reply_error(StacksHttpResponse::new_empty_error( - &HttpBadRequest::new( - "Received an HTTP message that the node could not decode" - .to_string(), - ), - )) { - Ok(_) => { - // prime the socket - if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { - debug!( - "Failed to flush HTTP 400 to socket {:?}: {:?}", - &client_sock, &e - ); - // convo_dead = true; - } - } - Err(e) => { + if let Err(e) = recv_res { + match e { + net_error::PermanentlyDrained => { + // socket got closed, but we might still have pending unsolicited messages + debug!( + "Remote HTTP peer disconnected event {} (socket {:?})", + event_id, &client_sock + ); + convo_dead = true; + } + net_error::InvalidMessage => { + // got sent bad data. If this was an inbound conversation, send it a HTTP + // 400 and close the socket. + debug!("Got a bad HTTP message on socket {:?}", &client_sock); + match convo.reply_error(StacksHttpResponse::new_empty_error( + &HttpBadRequest::new( + "Received an HTTP message that the node could not decode".to_string(), + ), + )) { + Ok(_) => { + // prime the socket + if let Err(e) = HttpPeer::saturate_http_socket(client_sock, convo) { debug!( - "Failed to reply HTTP 400 to socket {:?}: {:?}", + "Failed to flush HTTP 400 to socket {:?}: {:?}", &client_sock, &e ); - convo_dead = true; + // convo_dead = true; } } + Err(e) => { + debug!( + "Failed to reply HTTP 400 to socket {:?}: {:?}", + &client_sock, &e + ); + convo_dead = true; + } } - _ => { - debug!( - "Failed to receive HTTP data on event {} (socket {:?}): {:?}", - event_id, &client_sock, &e - ); - convo_dead = true; - } + } + _ => { + debug!( + "Failed to receive HTTP data on event {} (socket {:?}): {:?}", + event_id, &client_sock, &e + ); + convo_dead = true; } } - Ok(_) => {} } // react to inbound messages -- do we need to send something out, or fulfill requests @@ -730,11 +726,8 @@ mod test { peer.step().unwrap(); // asked to yield? - match http_rx.try_recv() { - Ok(_) => { - break; - } - Err(_) => {} + if http_rx.try_recv().is_ok() { + break; } } diff --git a/stackslib/src/net/tests/convergence.rs b/stackslib/src/net/tests/convergence.rs index a6b4a83df5..ae81703c53 100644 --- a/stackslib/src/net/tests/convergence.rs +++ b/stackslib/src/net/tests/convergence.rs @@ -218,7 +218,7 @@ fn test_walk_ring_15_org_biased() { let peers = test_walk_ring(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -226,11 +226,8 @@ fn test_walk_ring_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -398,7 +395,7 @@ fn test_walk_line_15_org_biased() { let peers = test_walk_line(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -406,11 +403,8 @@ fn test_walk_line_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -634,7 +628,7 @@ fn test_walk_star_15_org_biased() { let peers = test_walk_star(&mut peer_configs); for i in 1..peer_count { - match PeerDB::get_peer( + if let Some(p) = PeerDB::get_peer( peers[i].network.peerdb.conn(), peer_0.addr.network_id, &peer_0.addr.addrbytes, @@ -642,11 +636,8 @@ fn test_walk_star_15_org_biased() { ) .unwrap() { - Some(p) => { - assert_eq!(p.asn, 1); - assert_eq!(p.org, 1); - } - None => {} + assert_eq!(p.asn, 1); + assert_eq!(p.org, 1); } } @@ -849,14 +840,11 @@ fn dump_peers(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); } - None => {} } } @@ -879,16 +867,13 @@ fn dump_peer_histograms(peers: &[TestPeer]) { let stats_opt = peers[i] .network .get_neighbor_stats(&peers[j].to_neighbor().addr); - match stats_opt { - Some(stats) => { - neighbor_index.push(j); - if stats.outbound { - outbound_neighbor_index.push(j); - } else { - inbound_neighbor_index.push(j); - } + if let Some(stats) = stats_opt { + neighbor_index.push(j); + if stats.outbound { + outbound_neighbor_index.push(j); + } else { + inbound_neighbor_index.push(j); } - None => {} } } for inbound in inbound_neighbor_index.iter() { @@ -998,32 +983,26 @@ fn run_topology_test_ex( debug!("Step peer {:?}", &nk); // allowed peers are still connected - match initial_allowed.get(&nk) { - Some(peer_list) => { - for pnk in peer_list.iter() { - if !peers[i].network.events.contains_key(&pnk.clone()) { - error!( - "{:?}: Perma-allowed peer {:?} not connected anymore", - &nk, &pnk - ); - assert!(false); - } + if let Some(peer_list) = initial_allowed.get(&nk) { + for pnk in peer_list.iter() { + if !peers[i].network.events.contains_key(&pnk.clone()) { + error!( + "{:?}: Perma-allowed peer {:?} not connected anymore", + &nk, &pnk + ); + assert!(false); } } - None => {} }; // denied peers are never connected - match initial_denied.get(&nk) { - Some(peer_list) => { - for pnk in peer_list.iter() { - if peers[i].network.events.contains_key(&pnk.clone()) { - error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); - assert!(false); - } + if let Some(peer_list) = initial_denied.get(&nk) { + for pnk in peer_list.iter() { + if peers[i].network.events.contains_key(&pnk.clone()) { + error!("{:?}: Perma-denied peer {:?} connected", &nk, &pnk); + assert!(false); } } - None => {} }; // all ports are unique in the p2p socket table diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs index 1ac85a3219..50ec1b1c03 100644 --- a/stackslib/src/net/tests/download/epoch2x.rs +++ b/stackslib/src/net/tests/download/epoch2x.rs @@ -171,20 +171,14 @@ fn test_get_block_availability() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -558,12 +552,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -839,12 +830,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_plain_100_blocks() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) @@ -930,12 +918,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_star() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1004,12 +989,9 @@ pub fn test_get_blocks_and_microblocks_5_peers_line() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1086,12 +1068,9 @@ pub fn test_get_blocks_and_microblocks_overwhelmed_connections() { |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } true }, @@ -1249,11 +1228,8 @@ pub fn test_get_blocks_and_microblocks_ban_url() { |_| {}, |peer| { let mut blocked = 0; - match peer.network.block_downloader { - Some(ref dl) => { - blocked = dl.blocked_urls.len(); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + blocked = dl.blocked_urls.len(); } if blocked >= 1 { // NOTE: this is the success criterion @@ -1470,12 +1446,9 @@ pub fn test_get_blocks_and_microblocks_2_peers_download_multiple_microblock_desc |peer| { // check peer health // nothing should break - match peer.network.block_downloader { - Some(ref dl) => { - assert_eq!(dl.broken_peers.len(), 0); - assert_eq!(dl.dead_peers.len(), 0); - } - None => {} + if let Some(ref dl) = peer.network.block_downloader { + assert_eq!(dl.broken_peers.len(), 0); + assert_eq!(dl.dead_peers.len(), 0); } // no block advertisements (should be disabled) diff --git a/stackslib/src/net/tests/inv/epoch2x.rs b/stackslib/src/net/tests/inv/epoch2x.rs index d6d7d9557a..949c9ad383 100644 --- a/stackslib/src/net/tests/inv/epoch2x.rs +++ b/stackslib/src/net/tests/inv/epoch2x.rs @@ -1390,22 +1390,16 @@ fn test_sync_inv_2_peers_plain() { }; // nothing should break - match peer_1.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; @@ -1553,46 +1547,38 @@ fn test_sync_inv_2_peers_stale() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { - if peer_2_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - for i in 0..first_stacks_block_height { - assert!(!peer_2_inv.inv.has_ith_block(i)); - assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); - } - peer_2_check = true; + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(peer_2_inv) = inv.block_stats.get(&peer_2.to_neighbor().addr) { + if peer_2_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + for i in 0..first_stacks_block_height { + assert!(!peer_2_inv.inv.has_ith_block(i)); + assert!(!peer_2_inv.inv.has_ith_microblock_stream(i)); } + peer_2_check = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - - if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { - if peer_1_inv.inv.num_sortitions - == first_stacks_block_height - - peer_1.config.burnchain.first_block_height - { - peer_1_check = true; - } + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); + + if let Some(peer_1_inv) = inv.block_stats.get(&peer_1.to_neighbor().addr) { + if peer_1_inv.inv.num_sortitions + == first_stacks_block_height - peer_1.config.burnchain.first_block_height + { + peer_1_check = true; } } - None => {} } round += 1; @@ -1703,54 +1689,48 @@ fn test_sync_inv_2_peers_unstable() { None => 0, }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_1_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_1_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { - peer_1_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { - peer_1_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_2.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_1_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_1_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_1_pox_cycle_start { + peer_1_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_1_block_cycle_start { + peer_1_block_cycle = true; } } - None => {} } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); - if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { - if stats.target_pox_reward_cycle > 0 { - peer_2_pox_cycle_start = true; - } - if stats.target_block_reward_cycle > 0 { - peer_2_block_cycle_start = true; - } - if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { - peer_2_pox_cycle = true; - } - if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { - peer_2_block_cycle = true; - } + if let Some(stats) = inv.get_stats(&peer_1.to_neighbor().addr) { + if stats.target_pox_reward_cycle > 0 { + peer_2_pox_cycle_start = true; + } + if stats.target_block_reward_cycle > 0 { + peer_2_block_cycle_start = true; + } + if stats.target_pox_reward_cycle == 0 && peer_2_pox_cycle_start { + peer_2_pox_cycle = true; + } + if stats.target_block_reward_cycle == 0 && peer_2_block_cycle_start { + peer_2_block_cycle = true; } } - None => {} } round += 1; @@ -1917,42 +1897,30 @@ fn test_sync_inv_2_peers_different_pox_vectors() { let _ = peer_2.step(); // peer 1 should see that peer 2 has all blocks for reward cycles 5 through 9 - match peer_1.network.inv_state { - Some(ref inv) => { - inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); - peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + inv_1_count = inv.get_inv_num_blocks(&peer_2.to_neighbor().addr); + peer_1_sorts = inv.get_inv_sortitions(&peer_2.to_neighbor().addr); }; // peer 2 should see that peer 1 has all blocks up to where we stopped feeding them to // it - match peer_2.network.inv_state { - Some(ref inv) => { - inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); - peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + inv_2_count = inv.get_inv_num_blocks(&peer_1.to_neighbor().addr); + peer_2_sorts = inv.get_inv_sortitions(&peer_1.to_neighbor().addr); }; - match peer_1.network.inv_state { - Some(ref inv) => { - info!("Peer 1 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_1.network.inv_state { + info!("Peer 1 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match peer_2.network.inv_state { - Some(ref inv) => { - info!("Peer 2 stats: {:?}", &inv.block_stats); - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer_2.network.inv_state { + info!("Peer 2 stats: {:?}", &inv.block_stats); + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index a2483fa052..220c671f0c 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -142,7 +142,7 @@ pub fn peer_get_nakamoto_invs<'a>( loop { peer.step_with_ibd(false).unwrap(); - if let Ok(..) = shutdown_recv.try_recv() { + if shutdown_recv.try_recv().is_ok() { break; } } @@ -1085,22 +1085,16 @@ fn test_nakamoto_inv_sync_across_epoch_change() { .unwrap_or(0); // nothing should break - match peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } - match other_peer.network.inv_state { - Some(ref inv) => { - assert_eq!(inv.get_broken_peers().len(), 0); - assert_eq!(inv.get_dead_peers().len(), 0); - assert_eq!(inv.get_diverged_peers().len(), 0); - } - None => {} + if let Some(ref inv) = other_peer.network.inv_state { + assert_eq!(inv.get_broken_peers().len(), 0); + assert_eq!(inv.get_dead_peers().len(), 0); + assert_eq!(inv.get_diverged_peers().len(), 0); } round += 1; diff --git a/stackslib/src/net/tests/neighbors.rs b/stackslib/src/net/tests/neighbors.rs index f1e3fa76cb..8c56b48b0d 100644 --- a/stackslib/src/net/tests/neighbors.rs +++ b/stackslib/src/net/tests/neighbors.rs @@ -68,20 +68,14 @@ fn test_step_walk_1_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -184,22 +178,16 @@ fn test_step_walk_1_neighbor_plain_no_natpunch() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.dead_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.dead_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; if let Some(s) = peer_1 @@ -306,20 +294,14 @@ fn test_step_walk_1_neighbor_denied() { walk_1_retries = peer_1.network.walk_retries; walk_2_retries = peer_2.network.walk_retries; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -400,20 +382,14 @@ fn test_step_walk_1_neighbor_bad_epoch() { walk_1_retries = peer_1.network.walk_attempts; walk_2_retries = peer_2.network.walk_attempts; - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -463,20 +439,14 @@ fn test_step_walk_1_neighbor_heartbeat_ping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -573,23 +543,17 @@ fn test_step_walk_1_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 2 never gets added to peer 1's frontier - assert!(!w.frontier.contains_key(&neighbor_2.addr)); - } - None => {} + // peer 2 never gets added to peer 1's frontier + assert!(!w.frontier.contains_key(&neighbor_2.addr)); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -657,23 +621,17 @@ fn test_step_walk_1_neighbor_behind() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); - // peer 1 never gets added to peer 2's frontier - assert!(!w.frontier.contains_key(&neighbor_1.addr)); - } - None => {} + // peer 1 never gets added to peer 2's frontier + assert!(!w.frontier.contains_key(&neighbor_1.addr)); }; i += 1; @@ -789,20 +747,14 @@ fn test_step_walk_10_neighbors_of_neighbor_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -942,20 +894,14 @@ fn test_step_walk_10_neighbors_of_neighbor_bootstrapping() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; steps += 1; @@ -1091,20 +1037,14 @@ fn test_step_walk_2_neighbors_plain() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; @@ -1371,28 +1311,19 @@ fn test_step_walk_3_neighbors_inbound() { ); test_debug!("========"); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_3.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_3.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; for (i, peer) in [&peer_1, &peer_2, &peer_3].iter().enumerate() { @@ -1542,20 +1473,14 @@ fn test_step_walk_2_neighbors_rekey() { let _ = peer_1.step(); let _ = peer_2.step(); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; } @@ -1649,20 +1574,14 @@ fn test_step_walk_2_neighbors_different_networks() { walk_2_count ); - match peer_1.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_1.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; - match peer_2.network.walk { - Some(ref w) => { - assert_eq!(w.result.broken_connections.len(), 0); - assert_eq!(w.result.replaced_neighbors.len(), 0); - } - None => {} + if let Some(ref w) = peer_2.network.walk { + assert_eq!(w.result.broken_connections.len(), 0); + assert_eq!(w.result.replaced_neighbors.len(), 0); }; i += 1; diff --git a/stackslib/src/net/tests/relay/epoch2x.rs b/stackslib/src/net/tests/relay/epoch2x.rs index 9d3de7aacd..2729d648eb 100644 --- a/stackslib/src/net/tests/relay/epoch2x.rs +++ b/stackslib/src/net/tests/relay/epoch2x.rs @@ -1695,23 +1695,17 @@ fn test_get_blocks_and_microblocks_2_peers_push_transactions() { let mut peer_0_to_1 = false; let mut peer_1_to_0 = false; for (nk, event_id) in peers[0].network.events.iter() { - match peers[0].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_1_nk { - peer_0_to_1 = true; - } + if let Some(convo) = peers[0].network.peers.get(event_id) { + if *nk == peer_1_nk { + peer_0_to_1 = true; } - None => {} } } for (nk, event_id) in peers[1].network.events.iter() { - match peers[1].network.peers.get(event_id) { - Some(convo) => { - if *nk == peer_0_nk { - peer_1_to_0 = true; - } + if let Some(convo) = peers[1].network.peers.get(event_id) { + if *nk == peer_0_nk { + peer_1_to_0 = true; } - None => {} } } @@ -3722,17 +3716,14 @@ fn test_block_versioned_smart_contract_mempool_rejection_until_v210() { // tenure 28 let versioned_contract = (*versioned_contract_opt.borrow()).clone().unwrap(); let versioned_contract_len = versioned_contract.serialize_to_vec().len(); - match node.chainstate.will_admit_mempool_tx( + if let Err(e) = node.chainstate.will_admit_mempool_tx( &sortdb.index_handle(&tip.sortition_id), &consensus_hash, &stacks_block.block_hash(), &versioned_contract, versioned_contract_len as u64, ) { - Err(e) => { - panic!("will_admit_mempool_tx {:?}", &e); - } - Ok(_) => {} + panic!("will_admit_mempool_tx {:?}", &e); }; peer.sortdb = Some(sortdb); diff --git a/stackslib/src/net/unsolicited.rs b/stackslib/src/net/unsolicited.rs index 1dbd3d7c37..6df40eaebe 100644 --- a/stackslib/src/net/unsolicited.rs +++ b/stackslib/src/net/unsolicited.rs @@ -481,21 +481,18 @@ impl PeerNetwork { if need_block { // have the downloader request this block if it's new and we don't have it - match self.block_downloader { - Some(ref mut downloader) => { - downloader.hint_block_sortition_height_available( - block_sortition_height, - ibd, - need_block, - ); + if let Some(ref mut downloader) = self.block_downloader { + downloader.hint_block_sortition_height_available( + block_sortition_height, + ibd, + need_block, + ); - // advance straight to download state if we're in inv state - if self.work_state == PeerNetworkWorkState::BlockInvSync { - debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); - } - self.have_data_to_download = true; + // advance straight to download state if we're in inv state + if self.work_state == PeerNetworkWorkState::BlockInvSync { + debug!("{:?}: advance directly to block download with knowledge of block sortition {}", &self.get_local_peer(), block_sortition_height); } - None => {} + self.have_data_to_download = true; } } } diff --git a/stackslib/src/util_lib/mod.rs b/stackslib/src/util_lib/mod.rs index 87031676db..af9a4d98a7 100644 --- a/stackslib/src/util_lib/mod.rs +++ b/stackslib/src/util_lib/mod.rs @@ -32,13 +32,10 @@ pub mod test { let mut done = false; while get_epoch_time_secs() <= deadline { sleep_ms(1000); - match rx.try_recv() { - Ok(success) => { - assert!(success); - done = true; - break; - } - Err(_) => {} + if let Ok(success) = rx.try_recv() { + assert!(success); + done = true; + break; } } diff --git a/testnet/stacks-node/src/nakamoto_node/miner.rs b/testnet/stacks-node/src/nakamoto_node/miner.rs index fb233737bb..16b33ead7a 100644 --- a/testnet/stacks-node/src/nakamoto_node/miner.rs +++ b/testnet/stacks-node/src/nakamoto_node/miner.rs @@ -21,6 +21,7 @@ use std::thread; use std::time::{Duration, Instant}; use clarity::boot_util::boot_code_id; +use clarity::vm::costs::ExecutionCost; use clarity::vm::types::PrincipalData; use libsigner::v0::messages::{MinerSlotID, SignerMessage}; use libsigner::StackerDBSession; @@ -177,6 +178,10 @@ pub struct BlockMinerThread { last_block_mined: Option, /// Number of blocks mined since a tenure change/extend was attempted mined_blocks: u64, + /// Cost consumed by the current tenure + tenure_cost: ExecutionCost, + /// Cost budget for the current tenure + tenure_budget: ExecutionCost, /// Copy of the node's registered VRF key registered_key: RegisteredKey, /// Burnchain block snapshot which elected this miner @@ -237,6 +242,8 @@ impl BlockMinerThread { burn_tip_at_start: burn_tip_at_start.clone(), tenure_change_time: Instant::now(), abort_flag: Arc::new(AtomicBool::new(false)), + tenure_cost: ExecutionCost::ZERO, + tenure_budget: ExecutionCost::ZERO, } } @@ -1183,7 +1190,7 @@ impl BlockMinerThread { } // build the block itself - let (mut block, consumed, size, tx_events) = NakamotoBlockBuilder::build_nakamoto_block( + let mut block_metadata = NakamotoBlockBuilder::build_nakamoto_block( &chain_state, &burn_db .index_handle_at_ch(&self.burn_block.consensus_hash) @@ -1210,39 +1217,48 @@ impl BlockMinerThread { e })?; - if block.txs.is_empty() { + if block_metadata.block.txs.is_empty() { return Err(ChainstateError::NoTransactionsToMine.into()); } let mining_key = self.keychain.get_nakamoto_sk(); let miner_signature = mining_key - .sign(block.header.miner_signature_hash().as_bytes()) + .sign( + block_metadata + .block + .header + .miner_signature_hash() + .as_bytes(), + ) .map_err(NakamotoNodeError::MinerSignatureError)?; - block.header.miner_signature = miner_signature; + block_metadata.block.header.miner_signature = miner_signature; info!( "Miner: Assembled block #{} for signer set proposal: {}, with {} txs", - block.header.chain_length, - block.header.block_hash(), - block.txs.len(); - "signer_sighash" => %block.header.signer_signature_hash(), - "consensus_hash" => %block.header.consensus_hash, - "parent_block_id" => %block.header.parent_block_id, - "timestamp" => block.header.timestamp, + block_metadata.block.header.chain_length, + block_metadata.block.header.block_hash(), + block_metadata.block.txs.len(); + "signer_sighash" => %block_metadata.block.header.signer_signature_hash(), + "consensus_hash" => %block_metadata.block.header.consensus_hash, + "parent_block_id" => %block_metadata.block.header.parent_block_id, + "timestamp" => block_metadata.block.header.timestamp, ); self.event_dispatcher.process_mined_nakamoto_block_event( self.burn_block.block_height, - &block, - size, - &consumed, - tx_events, + &block_metadata.block, + block_metadata.tenure_size, + &block_metadata.tenure_consumed, + block_metadata.tx_events, ); + self.tenure_cost = block_metadata.tenure_consumed; + self.tenure_budget = block_metadata.tenure_budget; + // last chance -- confirm that the stacks tip is unchanged (since it could have taken long // enough to build this block that another block could have arrived), and confirm that all // Stacks blocks with heights higher than the canonical tip are processed. self.check_burn_tip_changed(&burn_db)?; - Ok(block) + Ok(block_metadata.block) } #[cfg_attr(test, mutants::skip)] @@ -1273,8 +1289,20 @@ impl BlockMinerThread { } } }; + // Check if we can and should include a time-based tenure extend. if self.last_block_mined.is_some() { - // Check if we can extend the current tenure + // Do not extend if we have spent < 50% of the budget, since it is + // not necessary. + let usage = self + .tenure_budget + .proportion_largest_dimension(&self.tenure_cost); + if usage < self.config.miner.tenure_extend_cost_threshold { + return Ok(NakamotoTenureInfo { + coinbase_tx: None, + tenure_change_tx: None, + }); + } + let tenure_extend_timestamp = coordinator.get_tenure_extend_timestamp(); if get_epoch_time_secs() <= tenure_extend_timestamp && self.tenure_change_time.elapsed() <= self.config.miner.tenure_timeout @@ -1284,6 +1312,7 @@ impl BlockMinerThread { tenure_change_tx: None, }); } + info!("Miner: Time-based tenure extend"; "current_timestamp" => get_epoch_time_secs(), "tenure_extend_timestamp" => tenure_extend_timestamp, diff --git a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs index 58d5f2da89..2138b7e767 100644 --- a/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs +++ b/testnet/stacks-node/src/nakamoto_node/signer_coordinator.rs @@ -13,9 +13,12 @@ // You should have received a copy of the GNU General Public License // along with this program. If not, see . +use std::collections::BTreeMap; +use std::ops::Bound::Included; use std::sync::atomic::AtomicBool; use std::sync::{Arc, Mutex}; use std::thread::JoinHandle; +use std::time::{Duration, Instant}; use libsigner::v0::messages::{MinerSlotID, SignerMessage as SignerMessageV0}; use libsigner::{BlockProposal, SignerSession, StackerDBSession}; @@ -66,6 +69,8 @@ pub struct SignerCoordinator { /// Rather, this burn block is used to determine whether or not a new /// burn block has arrived since this thread started. burn_tip_at_start: ConsensusHash, + /// The timeout configuration based on the percentage of rejections + block_rejection_timeout_steps: BTreeMap, } impl SignerCoordinator { @@ -101,6 +106,14 @@ impl SignerCoordinator { let miners_contract_id = boot_code_id(MINERS_NAME, is_mainnet); let miners_session = StackerDBSession::new(&rpc_socket.to_string(), miners_contract_id); + // build a BTreeMap of the various timeout steps + let mut block_rejection_timeout_steps = BTreeMap::::new(); + for (percentage, duration) in config.miner.block_rejection_timeout_steps.iter() { + let rejections_amount = + ((f64::from(listener.total_weight) / 100.0) * f64::from(*percentage)) as u32; + block_rejection_timeout_steps.insert(rejections_amount, *duration); + } + let mut sc = Self { message_key, is_mainnet, @@ -111,6 +124,7 @@ impl SignerCoordinator { keep_running, listener_thread: None, burn_tip_at_start: burn_tip_at_start.clone(), + block_rejection_timeout_steps, }; // Spawn the signer DB listener thread @@ -293,16 +307,38 @@ impl SignerCoordinator { sortdb: &SortitionDB, counters: &Counters, ) -> Result, NakamotoNodeError> { + // the amount of current rejections (used to eventually modify the timeout) + let mut rejections: u32 = 0; + // default timeout (the 0 entry must be always present) + let mut rejections_timeout = self + .block_rejection_timeout_steps + .get(&rejections) + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; + + // this is used to track the start of the waiting cycle + let rejections_timer = Instant::now(); loop { + // At every iteration wait for the block_status. + // Exit when the amount of confirmations/rejections reaches the threshold (or until timeout) + // Based on the amount of rejections, eventually modify the timeout. let block_status = match self.stackerdb_comms.wait_for_block_status( block_signer_sighash, EVENT_RECEIVER_POLL, |status| { - status.total_weight_signed < self.weight_threshold - && status - .total_reject_weight - .saturating_add(self.weight_threshold) - <= self.total_weight + // rejections-based timeout expired? + if rejections_timer.elapsed() > *rejections_timeout { + return false; + } + // number or rejections changed? + if status.total_reject_weight != rejections { + return false; + } + // enough signatures? + return status.total_weight_signed < self.weight_threshold; }, )? { Some(status) => status, @@ -336,10 +372,44 @@ impl SignerCoordinator { return Err(NakamotoNodeError::BurnchainTipChanged); } + if rejections_timer.elapsed() > *rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); + return Err(NakamotoNodeError::SigningCoordinatorFailure( + "Timed out while waiting for signatures".into(), + )); + } + continue; } }; + if rejections != block_status.total_reject_weight { + rejections = block_status.total_reject_weight; + let (rejections_step, new_rejections_timeout) = self + .block_rejection_timeout_steps + .range((Included(0), Included(rejections))) + .last() + .ok_or_else(|| { + NakamotoNodeError::SigningCoordinatorFailure( + "Invalid rejection timeout step function definition".into(), + ) + })?; + rejections_timeout = new_rejections_timeout; + info!("Number of received rejections updated, resetting timeout"; + "rejections" => rejections, + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections_step" => rejections_step, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold)); + + counters.set_miner_current_rejections_timeout_secs(rejections_timeout.as_secs()); + counters.set_miner_current_rejections(rejections); + } + if block_status .total_reject_weight .saturating_add(self.weight_threshold) @@ -357,10 +427,18 @@ impl SignerCoordinator { "block_signer_sighash" => %block_signer_sighash, ); return Ok(block_status.gathered_signatures.values().cloned().collect()); - } else { + } else if rejections_timer.elapsed() > *rejections_timeout { + warn!("Timed out while waiting for responses from signers"; + "elapsed" => rejections_timer.elapsed().as_secs(), + "rejections_timeout" => rejections_timeout.as_secs(), + "rejections" => rejections, + "rejections_threshold" => self.total_weight.saturating_sub(self.weight_threshold) + ); return Err(NakamotoNodeError::SigningCoordinatorFailure( - "Unblocked without reaching the threshold".into(), + "Timed out while waiting for signatures".into(), )); + } else { + continue; } } } diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 0c64444017..299335f35f 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -118,6 +118,9 @@ pub struct Counters { pub naka_miner_directives: RunLoopCounter, pub naka_submitted_commit_last_stacks_tip: RunLoopCounter, + pub naka_miner_current_rejections: RunLoopCounter, + pub naka_miner_current_rejections_timeout_secs: RunLoopCounter, + #[cfg(test)] pub naka_skip_commit_op: TestFlag, } @@ -214,6 +217,14 @@ impl Counters { pub fn set_microblocks_processed(&self, value: u64) { Counters::set(&self.microblocks_processed, value) } + + pub fn set_miner_current_rejections_timeout_secs(&self, value: u64) { + Counters::set(&self.naka_miner_current_rejections_timeout_secs, value) + } + + pub fn set_miner_current_rejections(&self, value: u32) { + Counters::set(&self.naka_miner_current_rejections, u64::from(value)) + } } /// Coordinating a node running in neon mode. diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs index 8e7132df49..3fa540d64f 100644 --- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs +++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs @@ -694,7 +694,7 @@ where error!("Timed out waiting for check to process"); return Err("Timed out".into()); } - thread::sleep(Duration::from_millis(100)); + thread::sleep(Duration::from_millis(500)); } Ok(()) } @@ -729,14 +729,14 @@ pub fn next_block_and_process_new_stacks_block( pub fn next_block_and_mine_commit( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], true, ) } @@ -746,14 +746,14 @@ pub fn next_block_and_mine_commit( pub fn next_block_and_commits_only( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &Arc>, - commits_submitted: &Arc, + node_conf: &Config, + node_counters: &Counters, ) -> Result<(), String> { next_block_and_wait_for_commits( btc_controller, timeout_secs, - &[coord_channels], - &[commits_submitted], + &[node_conf], + &[node_counters], false, ) } @@ -766,98 +766,47 @@ pub fn next_block_and_commits_only( pub fn next_block_and_wait_for_commits( btc_controller: &mut BitcoinRegtestController, timeout_secs: u64, - coord_channels: &[&Arc>], - commits_submitted: &[&Arc], + node_confs: &[&Config], + node_counters: &[&Counters], wait_for_stacks_block: bool, ) -> Result<(), String> { - let commits_submitted: Vec<_> = commits_submitted.to_vec(); - let blocks_processed_before: Vec<_> = coord_channels + let infos_before: Vec<_> = node_confs.iter().map(|c| get_chain_info(c)).collect(); + let burn_ht_before = infos_before .iter() - .map(|x| { - x.lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed() - }) - .collect(); - let commits_before: Vec<_> = commits_submitted + .map(|info| info.burn_block_height) + .max() + .unwrap(); + let stacks_ht_before = infos_before .iter() - .map(|x| x.load(Ordering::SeqCst)) - .collect(); + .map(|info| info.stacks_tip_height) + .max() + .unwrap(); + let last_commit_burn_hts = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_burn_height); + let last_commit_stacks_hts = node_counters + .iter() + .map(|c| &c.naka_submitted_commit_last_stacks_tip); - let mut block_processed_time: Vec> = vec![None; commits_before.len()]; - let mut commit_sent_time: Vec> = vec![None; commits_before.len()]; next_block_and(btc_controller, timeout_secs, || { - for i in 0..commits_submitted.len() { - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let now = Instant::now(); - if blocks_processed > blocks_processed_before[i] && block_processed_time[i].is_none() { - block_processed_time[i].replace(now); - } - if commits_sent > commits_before[i] && commit_sent_time[i].is_none() { - commit_sent_time[i].replace(now); - } - } - + let burn_height_committed_to = + last_commit_burn_hts.clone().all(|last_commit_burn_height| { + last_commit_burn_height.load(Ordering::SeqCst) > burn_ht_before + }); if !wait_for_stacks_block { - for i in 0..commits_submitted.len() { - // just wait for the commit - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - if commits_sent <= commits_before[i] { - return Ok(false); - } - - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 1 { - continue; - } - return Ok(false); - } - return Ok(true); - } - - // waiting for both commit and stacks block - for i in 0..commits_submitted.len() { - let blocks_processed = coord_channels[i] - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - let commits_sent = commits_submitted[i].load(Ordering::SeqCst); - - if blocks_processed > blocks_processed_before[i] { - // either we don't care about the stacks block count, or the block count advanced. - // Check the block-commits. - let block_processed_time = block_processed_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed block time wasn't set")?; - if commits_sent <= commits_before[i] { - return Ok(false); - } - let commit_sent_time = commit_sent_time[i] - .as_ref() - .ok_or("TEST-ERROR: Processed commit time wasn't set")?; - // try to ensure the commit was sent after the block was processed - if commit_sent_time > block_processed_time { - continue; - } - // if two commits have been sent, one of them must have been after - if commits_sent >= commits_before[i] + 2 { - continue; - } - // otherwise, just timeout if the commit was sent and its been long enough - // for a new commit pass to have occurred - if block_processed_time.elapsed() > Duration::from_secs(10) { - continue; - } - return Ok(false); - } else { + Ok(burn_height_committed_to) + } else { + if !burn_height_committed_to { return Ok(false); } + let stacks_tip_committed_to = + last_commit_stacks_hts + .clone() + .all(|last_commit_stacks_height| { + last_commit_stacks_height.load(Ordering::SeqCst) > stacks_ht_before + }); + return Ok(stacks_tip_committed_to); } - Ok(true) }) } @@ -1562,6 +1511,7 @@ fn simple_neon_integration() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let node_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -1619,13 +1569,8 @@ fn simple_neon_integration() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // Submit a TX @@ -1673,13 +1618,8 @@ fn simple_neon_integration() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &node_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1796,17 +1736,17 @@ fn restarting_miner() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let rl1_counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let mut run_loop_2 = boot_nakamoto::BootRunLoop::new(naka_conf.clone()).unwrap(); let _run_loop_2_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed: blocks_processed_2, - naka_submitted_commits: commits_submitted_2, naka_proposed_blocks: proposals_submitted_2, .. } = run_loop_2.counters(); - let coord_channel_2 = run_loop_2.coordinator_channels(); + let rl2_counters = run_loop_2.counters(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); wait_for_runloop(&blocks_processed); @@ -1848,13 +1788,8 @@ fn restarting_miner() { // Mine 2 nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl1_counters) + .unwrap(); } let last_tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) @@ -1898,13 +1833,16 @@ fn restarting_miner() { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - Ok(tip.stacks_block_height > last_tip.stacks_block_height) + let stacks_tip_committed_to = rl2_counters + .naka_submitted_commit_last_stacks_tip + .load(Ordering::SeqCst); + Ok(tip.stacks_block_height > last_tip.stacks_block_height + && stacks_tip_committed_to > last_tip.stacks_block_height) }) .unwrap_or_else(|e| { let tip = NakamotoChainState::get_canonical_block_header(chainstate.db(), &sortdb) .unwrap() .unwrap(); - error!( "Failed to get a new block after restart"; "last_tip_height" => last_tip.stacks_block_height, @@ -1917,13 +1855,8 @@ fn restarting_miner() { // Mine 2 more nakamoto tenures for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel_2, - &commits_submitted_2, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &rl2_counters) + .unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -1970,6 +1903,7 @@ fn restarting_miner() { #[test] #[ignore] +#[allow(non_snake_case)] /// This test spins up a nakamoto-neon node. /// It starts in Epoch 2.0, mines with `neon_node` to Epoch 3.0, /// having flash blocks when epoch updates and expects everything to work normally, @@ -1979,7 +1913,12 @@ fn restarting_miner() { /// * 30 blocks are mined after 3.0 starts. This is enough to mine across 2 reward cycles /// * A transaction submitted to the mempool in 3.0 will be mined in 3.0 /// * The final chain tip is a nakamoto block -fn flash_blocks_on_epoch_3() { +/// +/// NOTE: This test has been disabled because it's flaky, and we don't need to +/// test the Epoch 3 transition since it's already happened +/// +/// See issue [#5765](https://github.com/stacks-network/stacks-core/issues/5765) for details +fn flash_blocks_on_epoch_3_FLAKY() { if env::var("BITCOIND_TEST") != Ok("1".into()) { return; } @@ -2022,6 +1961,7 @@ fn flash_blocks_on_epoch_3() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2096,13 +2036,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // Submit a TX @@ -2138,13 +2072,7 @@ fn flash_blocks_on_epoch_3() { // Mine 15 more nakamoto tenures for _i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // load the chain tip, and assert that it is a nakamoto block and at least 30 blocks have advanced in epoch 3 @@ -2721,6 +2649,7 @@ fn correct_burn_outs() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -2893,12 +2822,9 @@ fn correct_burn_outs() { let prior_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()) .unwrap() .block_height; - if let Err(e) = next_block_and_mine_commit( - &mut btc_regtest_controller, - 30, - &coord_channel, - &commits_submitted, - ) { + if let Err(e) = + next_block_and_mine_commit(&mut btc_regtest_controller, 30, &naka_conf, &counters) + { warn!( "Error while minting a bitcoin block and waiting for stacks-node activity: {e:?}" ); @@ -3038,6 +2964,7 @@ fn block_proposal_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3078,13 +3005,7 @@ fn block_proposal_api_endpoint() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -3416,6 +3337,7 @@ fn miner_writes_proposed_block_to_stackerdb() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3436,13 +3358,7 @@ fn miner_writes_proposed_block_to_stackerdb() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let sortdb = naka_conf.get_burnchain().open_sortition_db(true).unwrap(); @@ -3527,6 +3443,7 @@ fn vote_for_aggregate_key_burn_op() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -3609,13 +3526,7 @@ fn vote_for_aggregate_key_burn_op() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -3665,13 +3576,7 @@ fn vote_for_aggregate_key_burn_op() { // the second block should process the vote, after which the vote should be set for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let mut vote_for_aggregate_key_found = false; @@ -4633,6 +4538,7 @@ fn burn_ops_integration_test() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -4684,13 +4590,7 @@ fn burn_ops_integration_test() { "Pre-stx operation should submit successfully" ); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let mut miner_signer_2 = Keychain::default(naka_conf.node.seed.clone()).generate_op_signer(); info!("Submitting second pre-stx op"); @@ -4818,13 +4718,7 @@ fn burn_ops_integration_test() { ); for _i in 0..(blocks_until_prepare) { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let reward_cycle = reward_cycle + 1; @@ -4981,13 +4875,7 @@ fn burn_ops_integration_test() { // the second block should process the ops // Also mine 2 interim blocks to ensure the stack-stx ops are not processed in them for _i in 0..2 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); for interim_block_ix in 0..2 { info!("Mining interim block {interim_block_ix}"); let blocks_processed_before = coord_channel @@ -6034,6 +5922,7 @@ fn nakamoto_attempt_time() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6073,13 +5962,7 @@ fn nakamoto_attempt_time() { // Mine 3 nakamoto tenures for _ in 0..3 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // TODO (hack) instantiate the sortdb in the burnchain @@ -6605,6 +6488,7 @@ fn signer_chainstate() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -6682,13 +6566,7 @@ fn signer_chainstate() { // hold the first and last blocks of the first tenure. we'll use this to submit reorging proposals let mut first_tenure_blocks: Option> = None; for i in 0..15 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // this config disallows any reorg due to poorly timed block commits let proposal_conf = ProposalEvalConfig { @@ -7199,6 +7077,7 @@ fn continue_tenure_extend() { naka_skip_commit_op: test_skip_commit_op, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7260,13 +7139,7 @@ fn continue_tenure_extend() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine a regular nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); wait_for(5, || { let blocks_processed = coord_channel @@ -7698,6 +7571,7 @@ fn check_block_times() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -7740,19 +7614,13 @@ fn check_block_times() { info!("Nakamoto miner started..."); blind_signer(&naka_conf, &signers, proposals_submitted); + wait_for_first_naka_block_commit(60, &counters.naka_submitted_commits); - let epochs = naka_conf.burnchain.epochs.clone().unwrap(); - let epoch_3 = &epochs[StacksEpochId::Epoch30]; - let epoch_3_start = epoch_3.start_height; - let mut last_stacks_block_height = 0; - let mut last_tenure_height = 0; - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - last_stacks_block_height = info.stacks_tip_height as u128; - last_tenure_height = last_stacks_block_height + 1; - Ok(info.burn_block_height == epoch_3_start) - }) - .unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + let mut last_stacks_block_height = info.stacks_tip_height as u128; + let mut last_tenure_height = last_stacks_block_height + 1; + + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let time0_value = call_read_only( &naka_conf, @@ -7810,16 +7678,13 @@ fn check_block_times() { Ok(stacks_block_height > last_stacks_block_height && cur_sender_nonce == sender_nonce) }) .expect("Timed out waiting for contracts to publish"); - last_stacks_block_height = stacks_block_height; // Repeat these tests for 5 tenures for _ in 0..5 { - next_block_and(&mut btc_regtest_controller, 60, || { - let info = get_chain_info_result(&naka_conf).unwrap(); - stacks_block_height = info.stacks_tip_height as u128; - Ok(stacks_block_height > last_stacks_block_height) - }) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); + let info = get_chain_info_result(&naka_conf).unwrap(); + stacks_block_height = info.stacks_tip_height as u128; + last_stacks_block_height = stacks_block_height; last_tenure_height += 1; info!("New tenure {last_tenure_height}, Stacks height: {last_stacks_block_height}"); @@ -8730,6 +8595,7 @@ fn check_block_info_rewards() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -8928,13 +8794,7 @@ fn check_block_info_rewards() { // (only 2 blocks maturation time in tests) info!("Mining 6 tenures to mature the block reward"); for i in 0..6 { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 20, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 20, &naka_conf, &counters).unwrap(); info!("Mined a block ({i})"); } @@ -9500,10 +9360,10 @@ fn v3_signer_api_endpoint() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -9564,13 +9424,7 @@ fn v3_signer_api_endpoint() { // Mine some nakamoto tenures for _i in 0..naka_tenures { - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); } let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -9676,7 +9530,7 @@ fn v3_blockbyheight_api_endpoint() { naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9698,13 +9552,7 @@ fn v3_blockbyheight_api_endpoint() { wait_for_first_naka_block_commit(60, &commits_submitted); // Mine 1 nakamoto tenure - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let burnchain = conf.get_burnchain(); let sortdb = burnchain.open_sortition_db(true).unwrap(); @@ -9799,11 +9647,10 @@ fn nakamoto_lockup_events() { let run_loop_stopper = run_loop.get_termination_switch(); let Counters { blocks_processed, - naka_submitted_commits: commits_submitted, naka_proposed_blocks: proposals_submitted, .. } = run_loop.counters(); - + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); let run_loop_thread = thread::spawn(move || run_loop.start(None, 0)); @@ -9834,13 +9681,7 @@ fn nakamoto_lockup_events() { info!("------------------------- Setup finished, run test -------------------------"); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &conf, &counters).unwrap(); let http_origin = format!("http://{}", &conf.node.rpc_bind); @@ -9984,6 +9825,7 @@ fn skip_mining_long_tx() { naka_mined_blocks: mined_naka_blocks, .. } = run_loop.counters(); + let counters = run_loop.counters(); let coord_channel = run_loop.coordinator_channels(); @@ -10022,13 +9864,7 @@ fn skip_mining_long_tx() { // Mine a few nakamoto tenures with some interim blocks in them for i in 0..5 { let mined_before = mined_naka_blocks.load(Ordering::SeqCst); - next_block_and_mine_commit( - &mut btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(&mut btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); if i == 0 { // we trigger the nakamoto miner to evaluate the long running transaction, @@ -10136,17 +9972,10 @@ fn test_shadow_recovery() { let naka_conf = signer_test.running_nodes.conf.clone(); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; - let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); + let counters = signer_test.running_nodes.counters.clone(); // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); let block_height = btc_regtest_controller.get_headers_height(); let reward_cycle = btc_regtest_controller @@ -10216,18 +10045,11 @@ fn test_shadow_recovery() { // revive ATC-C by waiting for commits for _i in 0..4 { - btc_regtest_controller.bootstrap_chain(1); - sleep_ms(30_000); + next_block_and_commits_only(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } // make another tenure - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // all shadow blocks are present and processed let mut shadow_ids = HashSet::new(); @@ -10950,8 +10772,7 @@ fn test_tenure_extend_from_flashblocks() { let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); let btc_regtest_controller = &mut signer_test.running_nodes.btc_regtest_controller; let coord_channel = signer_test.running_nodes.coord_channel.clone(); - let commits_submitted = signer_test.running_nodes.commits_submitted.clone(); - let sortitions_processed = signer_test.running_nodes.sortitions_processed.clone(); + let counters = signer_test.running_nodes.counters.clone(); let nakamoto_test_skip_commit_op = signer_test .running_nodes .nakamoto_test_skip_commit_op @@ -10971,13 +10792,7 @@ fn test_tenure_extend_from_flashblocks() { .unwrap(); for _ in 0..3 { - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); } let burn_view_contract = r#" @@ -11009,69 +10824,33 @@ fn test_tenure_extend_from_flashblocks() { ); submit_tx(&http_origin, &contract_tx); - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let sender_nonce = get_account(&naka_conf.node.data_url, &deployer_addr).nonce; + Ok(sender_nonce > 0) }) .expect("Timed out waiting for interim blocks to be mined"); - next_block_and_mine_commit( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // stall miner and relayer - // make tenure but don't wait for a stacks block - next_block_and_commits_only( - btc_regtest_controller, - 60, - &coord_channel, - &commits_submitted, - ) - .unwrap(); + // make tenure + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); - // prevent the mienr from sending another block-commit + // prevent the miner from sending another block-commit nakamoto_test_skip_commit_op.set(true); - // make sure we get a block-found tenure change - let blocks_processed_before = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - - // make sure the relayer processes both sortitions - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); + let info_before = get_chain_info(&naka_conf); // mine another Bitcoin block right away, since it will contain a block-commit btc_regtest_controller.bootstrap_chain(1); - wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) - }) - .unwrap(); - wait_for(120, || { - let blocks_processed = coord_channel - .lock() - .expect("Mutex poisoned") - .get_stacks_blocks_processed(); - Ok(blocks_processed > blocks_processed_before) + let info = get_chain_info(&naka_conf); + Ok(info.burn_block_height > info_before.burn_block_height + && info.stacks_tip_height > info_before.stacks_tip_height) }) - .expect("Timed out waiting for interim blocks to be mined"); + .unwrap(); let (canonical_stacks_tip_ch, _) = SortitionDB::get_canonical_stacks_chain_tip_hash(sortdb.conn()).unwrap(); @@ -11098,11 +10877,9 @@ fn test_tenure_extend_from_flashblocks() { // Given the above, this will be an `Extend` tenure. TEST_MINER_THREAD_STALL.set(false); - let sortitions_processed_before = sortitions_processed.load(Ordering::SeqCst); wait_for(60, || { - sleep_ms(100); - let sortitions_cnt = sortitions_processed.load(Ordering::SeqCst); - Ok(sortitions_cnt > sortitions_processed_before) + let cur_sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); + Ok(cur_sort_tip.block_height > sort_tip.block_height) }) .unwrap(); @@ -11180,7 +10957,6 @@ fn test_tenure_extend_from_flashblocks() { // wait for the miner directive to be processed wait_for(60, || { - sleep_ms(30_000); let directives_cnt = nakamoto_miner_directives.load(Ordering::SeqCst); Ok(directives_cnt > miner_directives_before) }) @@ -11188,7 +10964,7 @@ fn test_tenure_extend_from_flashblocks() { // wait for all of the aforementioned transactions to get mined wait_for(120, || { - // fill mempool with transactions that depend on the burn view + // check account nonces from the sent transactions for (sender_sk, account_before) in account_keys.iter().zip(accounts_before.iter()) { let sender_addr = tests::to_addr(sender_sk); let account = loop { @@ -11245,28 +11021,7 @@ fn test_tenure_extend_from_flashblocks() { } // mine one additional tenure, to verify that we're on track - let commits_before = commits_submitted.load(Ordering::SeqCst); - let node_info_before = get_chain_info_opt(&naka_conf).unwrap(); - - btc_regtest_controller.bootstrap_chain(1); - - wait_for(20, || { - Ok(commits_submitted.load(Ordering::SeqCst) > commits_before) - }) - .unwrap(); - - // there was a sortition winner - let sort_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn()).unwrap(); - assert!(sort_tip.sortition); - - wait_for(20, || { - let node_info = get_chain_info_opt(&naka_conf).unwrap(); - Ok( - node_info.burn_block_height > node_info_before.burn_block_height - && node_info.stacks_tip_height > node_info_before.stacks_tip_height, - ) - }) - .unwrap(); + next_block_and_mine_commit(btc_regtest_controller, 60, &naka_conf, &counters).unwrap(); // boot a follower. it should reach the chain tip info!("----- BEGIN FOLLOWR BOOTUP ------"); @@ -11321,9 +11076,8 @@ fn test_tenure_extend_from_flashblocks() { debug!("Booted follower-thread"); - let miner_info = get_chain_info_result(&naka_conf).unwrap(); - wait_for(300, || { + let miner_info = get_chain_info_result(&naka_conf).unwrap(); let Ok(info) = get_chain_info_result(&follower_conf) else { sleep_ms(1000); return Ok(false); diff --git a/testnet/stacks-node/src/tests/signer/mod.rs b/testnet/stacks-node/src/tests/signer/mod.rs index 1bf57444ed..6b355fe5aa 100644 --- a/testnet/stacks-node/src/tests/signer/mod.rs +++ b/testnet/stacks-node/src/tests/signer/mod.rs @@ -336,15 +336,14 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest + Send + 'static, T: SignerEventTrait + 'static> SignerTest>], - commits_submitted: &[&Arc], + node_confs: &[&NeonConfig], + node_counters: &[&Counters], timeout: Duration, ) { let blocks_len = test_observer::get_blocks().len(); @@ -370,8 +369,8 @@ impl + Send + 'static, T: SignerEventTrait + 'static> SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(sender_addr, send_amt + send_fee)], + |config| { + config.block_proposal_validation_timeout = timeout; + }, + |config| { + config.miner.block_rejection_timeout_steps.clear(); + config + .miner + .block_rejection_timeout_steps + .insert(0, Duration::from_secs(123)); + config + .miner + .block_rejection_timeout_steps + .insert(10, Duration::from_secs(20)); + config + .miner + .block_rejection_timeout_steps + .insert(15, Duration::from_secs(10)); + config + .miner + .block_rejection_timeout_steps + .insert(20, Duration::from_secs(99)); + }, + None, + None, + ); + + let all_signers: Vec<_> = signer_test + .signer_stacks_private_keys + .iter() + .map(StacksPublicKey::from_private) + .collect(); + + signer_test.boot_to_epoch_3(); + + // note we just use mined nakamoto_blocks as the second block is not going to be confirmed + + let mut test_rejections = |signer_split_index: usize, expected_timeout: u64| { + let blocks_before = test_observer::get_mined_nakamoto_blocks().len(); + let (ignore_signers, reject_signers) = all_signers.split_at(signer_split_index); + + info!("------------------------- Check Rejections-based timeout with {} rejections -------------------------", reject_signers.len()); + + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(reject_signers.to_vec()); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(ignore_signers.to_vec()); + + next_block_and( + &mut signer_test.running_nodes.btc_regtest_controller, + 30, + || Ok(test_observer::get_mined_nakamoto_blocks().len() > blocks_before), + ) + .unwrap(); + + signer_test + .wait_for_block_rejections(timeout.as_secs(), &reject_signers) + .unwrap(); + + wait_for(60, || { + Ok(signer_test + .running_nodes + .counters + .naka_miner_current_rejections + .get() + >= reject_signers.len() as u64) + }) + .unwrap(); + assert_eq!( + signer_test + .running_nodes + .counters + .naka_miner_current_rejections_timeout_secs + .get(), + expected_timeout + ); + }; + + test_rejections(19, 123); + test_rejections(18, 20); + test_rejections(17, 10); + test_rejections(16, 99); + + // reset reject/ignore + TEST_REJECT_ALL_BLOCK_PROPOSAL.set(vec![]); + TEST_IGNORE_ALL_BLOCK_PROPOSALS.set(vec![]); + + info!("------------------------- Shutdown -------------------------"); + signer_test.shutdown(); +} + /// Test scenario: /// /// - when a signer submits a block validation request and @@ -11655,6 +11767,7 @@ fn multiple_miners_empty_sortition() { naka_submitted_commits: rl2_commits, .. } = run_loop_2.counters(); + let rl2_counters = run_loop_2.counters(); let run_loop_2_thread = thread::Builder::new() .name("run_loop_2".into()) .spawn(move || run_loop_2.start(None, 0)) @@ -11691,16 +11804,16 @@ fn multiple_miners_empty_sortition() { ); submit_tx(&conf.node.data_url, &contract_tx); - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); let last_sender_nonce = loop { // Mine 1 nakamoto tenures info!("Mining tenure..."); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels, &rl2_coord_channels], - &[&rl1_commits, &rl2_commits], + &[&conf, &conf_node_2], + &[&rl1_counters, &rl2_counters], Duration::from_secs(30), ); @@ -11734,16 +11847,29 @@ fn multiple_miners_empty_sortition() { let last_active_sortition = get_sortition_info(&conf); assert!(last_active_sortition.was_sortition); + // check if we're about to cross a reward cycle boundary -- if so, we can't + // perform this test, because we can't tenure extend across the boundary + let pox_info = get_pox_info(&conf.node.data_url).unwrap(); + let blocks_until_next_cycle = pox_info.next_cycle.blocks_until_reward_phase; + if blocks_until_next_cycle == 1 { + info!("We're about to cross a reward cycle boundary, cannot perform a tenure extend here!"); + continue; + } + // lets mine a btc flash block let rl2_commits_before = rl2_commits.load(Ordering::SeqCst); let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); + signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl2_commits.load(Ordering::SeqCst) > rl2_commits_before + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl2_commits.load(Ordering::SeqCst) > rl2_commits_before && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); @@ -11861,16 +11987,17 @@ fn single_miner_empty_sortition() { ); submit_tx(&conf.node.data_url, &contract_tx); - let rl1_coord_channels = signer_test.running_nodes.coord_channel.clone(); let rl1_commits = signer_test.running_nodes.commits_submitted.clone(); + let rl1_counters = signer_test.running_nodes.counters.clone(); + let rl1_conf = signer_test.running_nodes.conf.clone(); for _i in 0..3 { // Mine 1 nakamoto tenures info!("Mining tenure..."); signer_test.mine_block_wait_on_processing( - &[&rl1_coord_channels], - &[&rl1_commits], + &[&rl1_conf], + &[&rl1_counters], Duration::from_secs(30), ); @@ -11906,13 +12033,16 @@ fn single_miner_empty_sortition() { // lets mine a btc flash block let rl1_commits_before = rl1_commits.load(Ordering::SeqCst); + let info_before = get_chain_info(&conf); signer_test .running_nodes .btc_regtest_controller .build_next_block(2); wait_for(60, || { - Ok(rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) + let info = get_chain_info(&conf); + Ok(info.burn_block_height >= 2 + info_before.burn_block_height + && rl1_commits.load(Ordering::SeqCst) > rl1_commits_before) }) .unwrap(); @@ -12596,3 +12726,112 @@ fn allow_reorg_within_first_proposal_burn_block_timing_secs() { run_loop_2_thread.join().unwrap(); signer_test.shutdown(); } + +#[test] +#[ignore] +/// This test verifies that a miner will produce a TenureExtend transaction +/// only after it has reached the cost threshold. +fn tenure_extend_cost_threshold() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + let deployer_sk = Secp256k1PrivateKey::random(); + let deployer_addr = tests::to_addr(&deployer_sk); + let num_txs = 10; + let tx_fee = 10000; + let deploy_fee = 190200; + + info!("------------------------- Test Setup -------------------------"); + let num_signers = 5; + let idle_timeout = Duration::from_secs(10); + let mut signer_test: SignerTest = SignerTest::new_with_config_modifications( + num_signers, + vec![(deployer_addr, deploy_fee + tx_fee * num_txs)], + |config| { + config.tenure_idle_timeout = idle_timeout; + }, + |config| { + config.miner.tenure_extend_cost_threshold = 5; + }, + None, + None, + ); + let naka_conf = signer_test.running_nodes.conf.clone(); + let http_origin = format!("http://{}", &naka_conf.node.rpc_bind); + + signer_test.boot_to_epoch_3(); + + info!("---- Nakamoto booted, starting test ----"); + signer_test.mine_nakamoto_block(Duration::from_secs(30), true); + + info!("---- Waiting for a tenure extend ----"); + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect_err("Received a tenure extend before cost threshold was reached"); + + // Now deploy a contract and call it in order to cross the threshold. + let contract_src = format!( + r#" +(define-data-var my-var uint u0) +(define-public (f) (begin {} (ok 1))) (begin (f)) + "#, + ["(var-get my-var)"; 250].join(" ") + ); + + // First, lets deploy the contract + let mut nonce = 0; + let contract_tx = make_contract_publish( + &deployer_sk, + nonce, + deploy_fee, + naka_conf.burnchain.chain_id, + "small-contract", + &contract_src, + ); + submit_tx(&http_origin, &contract_tx); + nonce += 1; + + // Wait for the contract to be included in a block + wait_for(60, || { + let account = get_account(&http_origin, &deployer_addr); + Ok(account.nonce == nonce) + }) + .expect("Contract not included in block"); + + // Ensure the tenure was not extended in that block + assert!(!last_block_contains_tenure_change_tx( + TenureChangeCause::Extended + )); + + // Now, lets call the contract a bunch of times to increase the tenure cost + for _ in 0..num_txs { + let call_tx = make_contract_call( + &deployer_sk, + nonce, + tx_fee, + naka_conf.burnchain.chain_id, + &deployer_addr, + "small-contract", + "f", + &[], + ); + submit_tx(&http_origin, &call_tx); + nonce += 1; + } + + // Now, wait for a block with a tenure extend + wait_for(idle_timeout.as_secs() + 10, || { + Ok(last_block_contains_tenure_change_tx( + TenureChangeCause::Extended, + )) + }) + .expect("Timed out waiting for a block with a tenure extend"); + + signer_test.shutdown(); +}