diff --git a/stackslib/src/net/tests/inv/nakamoto.rs b/stackslib/src/net/tests/inv/nakamoto.rs index 3a29d453ae..cb09236ccb 100644 --- a/stackslib/src/net/tests/inv/nakamoto.rs +++ b/stackslib/src/net/tests/inv/nakamoto.rs @@ -901,18 +901,10 @@ fn test_nakamoto_inv_sync_state_machine() { let _ = peer.step_with_ibd(false); let _ = other_peer.step_with_ibd(false); - let event_ids: Vec = peer - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); - let other_event_ids: Vec = other_peer - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); + let event_ids = peer.network.iter_peer_event_ids(); + let other_event_ids = other_peer.network.iter_peer_event_ids(); - if !event_ids.is_empty() && !other_event_ids.is_empty() { + if !(event_ids.count() == 0) && !(other_event_ids.count() == 0) { break; } } @@ -937,8 +929,8 @@ fn test_nakamoto_inv_sync_state_machine() { let mut last_learned_rc = 0; loop { let _ = other_peer.step_with_ibd(false); - let ev_ids: Vec<_> = other_peer.network.iter_peer_event_ids().collect(); - if ev_ids.is_empty() { + let ev_ids = other_peer.network.iter_peer_event_ids(); + if ev_ids.count() == 0 { // disconnected panic!("Disconnected"); } @@ -1032,18 +1024,10 @@ fn test_nakamoto_inv_sync_across_epoch_change() { let _ = peer.step_with_ibd(false); let _ = other_peer.step_with_ibd(false); - let event_ids: Vec = peer - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); - let other_event_ids: Vec = other_peer - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); + let event_ids = peer.network.iter_peer_event_ids(); + let other_event_ids = other_peer.network.iter_peer_event_ids(); - if !event_ids.is_empty() && !other_event_ids.is_empty() { + if !(event_ids.count() == 0) && !(other_event_ids.count() == 0) { break; } } diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs index e635dd8006..e1430454e8 100644 --- a/stackslib/src/net/tests/mempool/mod.rs +++ b/stackslib/src/net/tests/mempool/mod.rs @@ -1133,18 +1133,10 @@ fn test_mempool_sync_2_peers_nakamoto_paginated() { let _ = peer_1.step_with_ibd(false); let _ = peer_2.step_with_ibd(false); - let event_ids: Vec = peer_1 - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); - let other_event_ids: Vec = peer_2 - .network - .iter_peer_event_ids() - .map(|e_id| *e_id) - .collect(); + let event_ids = peer_1.network.iter_peer_event_ids(); + let other_event_ids = peer_2.network.iter_peer_event_ids(); - if !event_ids.is_empty() && !other_event_ids.is_empty() { + if !(event_ids.count() == 0) && !(other_event_ids.count() == 0) { break; } } diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 070837997d..6b1b8d2426 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -1227,16 +1227,15 @@ impl BlockMinerThread { // process earlier tips, back to max_depth for cur_height in end_height.saturating_sub(max_depth)..end_height { - let stacks_tips: Vec<_> = chain_state + let stacks_tips = chain_state .get_stacks_chain_tips_at_height(cur_height) .expect("FATAL: could not query chain tips at height") .into_iter() .filter(|candidate| { Self::is_on_canonical_burnchain_fork(candidate, &sortdb_tip_handle) - }) - .collect(); + }); - for tip in stacks_tips.into_iter() { + for tip in stacks_tips { let index_block_hash = StacksBlockId::new(&tip.consensus_hash, &tip.anchored_block_hash); diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 7f893835d1..28522e914e 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -955,17 +955,15 @@ fn integration_test_get_info() { .as_array() .expect("Fees should be array"); - let estimated_fee_rates: Vec<_> = estimations + let estimated_fee_rates = estimations .iter() - .map(|x| x.get("fee_rate").expect("Should have fee_rate field")) - .collect(); - let estimated_fees: Vec<_> = estimations + .map(|x| x.get("fee_rate").expect("Should have fee_rate field")); + let estimated_fees = estimations .iter() - .map(|x| x.get("fee").expect("Should have fee field")) - .collect(); + .map(|x| x.get("fee").expect("Should have fee field")); - assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); - assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); + assert_eq!(estimated_fee_rates.count(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fees.count(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { address: contract_addr, @@ -1006,16 +1004,15 @@ fn integration_test_get_info() { .as_array() .expect("Fees should be array"); - let estimated_fee_rates: Vec<_> = estimations + let estimated_fee_rates = estimations .iter() - .map(|x| x.get("fee_rate").expect("Should have fee_rate field")) - .collect(); + .map(|x| x.get("fee_rate").expect("Should have fee_rate field")); let estimated_fees: Vec<_> = estimations .iter() .map(|x| x.get("fee").expect("Should have fee field")) .collect(); - assert_eq!(estimated_fee_rates.len(), 3, "Fee rates should be length 3 array"); + assert_eq!(estimated_fee_rates.count(), 3, "Fee rates should be length 3 array"); assert_eq!(estimated_fees.len(), 3, "Fees should be length 3 array"); let tx_payload = TransactionPayload::from(TransactionContractCall { diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 6f02ecf138..a1eef92900 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -759,12 +759,8 @@ fn should_succeed_mining_valid_txs() { )); // 0 event should have been produced - let events: Vec = chain_tip - .receipts - .iter() - .flat_map(|a| a.events.clone()) - .collect(); - assert!(events.is_empty()); + let events = chain_tip.receipts.iter().flat_map(|a| a.events.clone()); + assert!(events.count() == 0); } 2 => { // Inspecting the chain at round 2. @@ -791,12 +787,8 @@ fn should_succeed_mining_valid_txs() { )); // 2 lockup events should have been produced - let events: Vec = chain_tip - .receipts - .iter() - .flat_map(|a| a.events.clone()) - .collect(); - assert_eq!(events.len(), 2); + let events = chain_tip.receipts.iter().flat_map(|a| a.events.clone()); + assert_eq!(events.count(), 2); } 3 => { // Inspecting the chain at round 3. diff --git a/testnet/stacks-node/src/tests/signer/v0.rs b/testnet/stacks-node/src/tests/signer/v0.rs index c0956d0113..8d7953aa72 100644 --- a/testnet/stacks-node/src/tests/signer/v0.rs +++ b/testnet/stacks-node/src/tests/signer/v0.rs @@ -2798,9 +2798,8 @@ fn tenure_extend_succeeds_after_rejected_attempt() { } } None - }) - .collect::>(); - Ok(signatures.len() >= num_signers * 7 / 10) + }); + Ok(signatures.count() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for a rejected tenure extend"); @@ -2867,12 +2866,8 @@ fn stx_transfers_dont_effect_idle_timeout() { let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); - assert_eq!(signer_slot_ids.len(), num_signers); + let signer_slot_ids = signer_test.get_signer_indices(reward_cycle).into_iter(); + assert_eq!(signer_slot_ids.count(), num_signers); let get_last_block_hash = || { let blocks = test_observer::get_blocks(); @@ -3748,13 +3743,9 @@ fn mock_sign_epoch_25() { // Mine until epoch 3.0 and ensure that no more mock signatures are received let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); + let signer_slot_ids = signer_test.get_signer_indices(reward_cycle).into_iter(); let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + assert_eq!(signer_slot_ids.count(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); @@ -3956,13 +3947,9 @@ fn multiple_miners_mock_sign_epoch_25() { // Mine until epoch 3.0 and ensure that no more mock signatures are received let reward_cycle = signer_test.get_current_reward_cycle(); - let signer_slot_ids: Vec<_> = signer_test - .get_signer_indices(reward_cycle) - .iter() - .map(|id| id.0) - .collect(); + let signer_slot_ids = signer_test.get_signer_indices(reward_cycle).into_iter(); let signer_public_keys = signer_test.get_signer_public_keys(reward_cycle); - assert_eq!(signer_slot_ids.len(), num_signers); + assert_eq!(signer_slot_ids.count(), num_signers); let miners_stackerdb_contract = boot_code_id(MINERS_NAME, false); @@ -5827,9 +5814,8 @@ fn reorg_locally_accepted_blocks_across_tenures_succeeds() { }); } None - }) - .collect::>(); - Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }); + Ok(accepted_signers.count() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); @@ -6045,9 +6031,8 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { }); } None - }) - .collect::>(); - Ok(accepted_signers.len() + ignoring_signers.len() == num_signers) + }); + Ok(accepted_signers.count() + ignoring_signers.len() == num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); @@ -6095,9 +6080,8 @@ fn reorg_locally_accepted_blocks_across_tenures_fails() { }), _ => None, } - }) - .collect::>(); - Ok(rejected_signers.len() + ignoring_signers.len() == num_signers) + }); + Ok(rejected_signers.count() + ignoring_signers.len() == num_signers) }, ) .expect("FAIL: Timed out waiting for block proposal rejections"); @@ -6291,9 +6275,8 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } } None - }) - .collect::>(); - Ok(signatures.len() >= num_signers * 7 / 10) + }); + Ok(signatures.count() >= num_signers * 7 / 10) }) .expect("Test timed out while waiting for signers signatures for first block proposal"); let block = block.unwrap(); @@ -6381,9 +6364,8 @@ fn miner_recovers_when_broadcast_block_delay_across_tenures_occurs() { } _ => None, } - }) - .collect::>(); - Ok(block_rejections.len() >= num_signers * 7 / 10) + }); + Ok(block_rejections.count() >= num_signers * 7 / 10) }) .expect("FAIL: Timed out waiting for block proposal rejections"); @@ -6752,7 +6734,7 @@ fn continue_after_fast_block_no_sortition() { wait_for(30, || { std::thread::sleep(Duration::from_secs(1)); let chunks = test_observer::get_stackerdb_chunks(); - let rejections: Vec<_> = chunks + let rejections = chunks .into_iter() .flat_map(|chunk| chunk.modified_slots) .filter(|chunk| { @@ -6764,9 +6746,8 @@ fn continue_after_fast_block_no_sortition() { message, SignerMessage::BlockResponse(BlockResponse::Rejected(_)) ) - }) - .collect(); - Ok(rejections.len() >= min_rejections) + }); + Ok(rejections.count() >= min_rejections) }) .expect("Timed out waiting for block rejections"); @@ -9607,7 +9588,7 @@ fn block_proposal_max_age_rejections() { info!("------------------------- Test Block Proposal Rejected -------------------------"); // Verify the signers rejected only the SECOND block proposal. The first was not even processed. wait_for(30, || { - let rejections: Vec<_> = test_observer::get_stackerdb_chunks() + let rejections = test_observer::get_stackerdb_chunks() .into_iter() .flat_map(|chunk| chunk.modified_slots) .map(|chunk| { @@ -9639,9 +9620,8 @@ fn block_proposal_max_age_rejections() { } _ => None, } - }) - .collect(); - Ok(rejections.len() > num_signers * 7 / 10) + }); + Ok(rejections.count() > num_signers * 7 / 10) }) .expect("Timed out waiting for block rejections"); @@ -11095,9 +11075,8 @@ fn injected_signatures_are_ignored_across_boundaries() { }); } None - }) - .collect::>(); - Ok(accepted_signers.len() + ignoring_signers.len() == new_num_signers) + }); + Ok(accepted_signers.count() + ignoring_signers.len() == new_num_signers) }) .expect("FAIL: Timed out waiting for block proposal acceptance"); let new_signature_hash = new_signature_hash.expect("Failed to get new signature hash");