From a02c9a60f5264ea82f571af150a594a6cdf23161 Mon Sep 17 00:00:00 2001
From: Jeff Bencin <jeff.bencin@gmail.com>
Date: Fri, 3 Jan 2025 17:36:04 -0500
Subject: [PATCH 1/2] chore: Apply Clippy lint `collection_is_never_used`

---
 stackslib/src/burnchains/tests/burnchain.rs   | 13 ------
 stackslib/src/burnchains/tests/db.rs          |  4 --
 stackslib/src/chainstate/coordinator/tests.rs | 33 --------------
 .../chainstate/nakamoto/coordinator/tests.rs  | 13 +-----
 .../src/chainstate/nakamoto/tests/mod.rs      |  2 -
 stackslib/src/chainstate/stacks/block.rs      | 13 ------
 .../src/chainstate/stacks/boot/pox_4_tests.rs |  4 ++
 .../src/chainstate/stacks/db/transactions.rs  |  3 +-
 .../src/chainstate/stacks/index/test/trie.rs  |  5 ---
 .../stacks/tests/block_construction.rs        | 45 -------------------
 .../stacks/tests/chain_histories.rs           | 14 ------
 stackslib/src/chainstate/stacks/tests/mod.rs  |  4 --
 .../src/chainstate/stacks/transaction.rs      |  3 --
 stackslib/src/core/tests/mod.rs               |  4 +-
 stackslib/src/net/atlas/tests.rs              |  5 ---
 stackslib/src/net/tests/download/epoch2x.rs   |  5 +--
 stackslib/src/net/tests/mempool/mod.rs        |  5 +--
 stackslib/src/net/tests/mod.rs                |  6 +--
 stackslib/src/net/tests/relay/nakamoto.rs     |  8 ----
 stackslib/src/util_lib/strings.rs             |  2 +-
 testnet/stacks-node/src/event_dispatcher.rs   |  4 ++
 testnet/stacks-node/src/tests/epoch_21.rs     |  6 ---
 .../src/tests/nakamoto_integrations.rs        |  7 ++-
 .../src/tests/neon_integrations.rs            | 10 -----
 24 files changed, 21 insertions(+), 197 deletions(-)

diff --git a/stackslib/src/burnchains/tests/burnchain.rs b/stackslib/src/burnchains/tests/burnchain.rs
index 7f6be5bcf8..278d9b2910 100644
--- a/stackslib/src/burnchains/tests/burnchain.rs
+++ b/stackslib/src/burnchains/tests/burnchain.rs
@@ -698,19 +698,14 @@ fn test_burn_snapshot_sequence() {
         initial_reward_start_block: first_block_height,
     };
 
-    let mut leader_private_keys = vec![];
     let mut leader_public_keys = vec![];
     let mut leader_bitcoin_public_keys = vec![];
-    let mut leader_bitcoin_addresses = vec![];
 
     for i in 0..32 {
         let mut csprng: ThreadRng = thread_rng();
         let vrf_privkey = VRFPrivateKey(ed25519_dalek::SigningKey::generate(&mut csprng));
         let vrf_pubkey = VRFPublicKey::from_private(&vrf_privkey);
 
-        let privkey_hex = vrf_privkey.to_hex();
-        leader_private_keys.push(privkey_hex);
-
         let pubkey_hex = vrf_pubkey.to_hex();
         leader_public_keys.push(pubkey_hex);
 
@@ -718,12 +713,6 @@ fn test_burn_snapshot_sequence() {
         let bitcoin_publickey = BitcoinPublicKey::from_private(&bitcoin_privkey);
 
         leader_bitcoin_public_keys.push(to_hex(&bitcoin_publickey.to_bytes()));
-
-        leader_bitcoin_addresses.push(BitcoinAddress::from_bytes_legacy(
-            BitcoinNetworkType::Testnet,
-            LegacyBitcoinAddressType::PublicKeyHash,
-            &Hash160::from_data(&bitcoin_publickey.to_bytes()).0,
-        ));
     }
 
     let mut expected_burn_total: u64 = 0;
@@ -732,7 +721,6 @@ fn test_burn_snapshot_sequence() {
     let mut db = SortitionDB::connect_test(first_block_height, &first_burn_hash).unwrap();
     let mut prev_snapshot =
         BlockSnapshot::initial(first_block_height, &first_burn_hash, first_block_height);
-    let mut all_stacks_block_hashes = vec![];
 
     for i in 0..32 {
         let mut block_ops = vec![];
@@ -823,7 +811,6 @@ fn test_burn_snapshot_sequence() {
                 burn_header_hash: burn_block_hash.clone(),
             };
 
-            all_stacks_block_hashes.push(next_block_commit.block_header_hash.clone());
             block_ops.push(BlockstackOperationType::LeaderBlockCommit(
                 next_block_commit,
             ));
diff --git a/stackslib/src/burnchains/tests/db.rs b/stackslib/src/burnchains/tests/db.rs
index c8f568b5bf..834a062088 100644
--- a/stackslib/src/burnchains/tests/db.rs
+++ b/stackslib/src/burnchains/tests/db.rs
@@ -919,8 +919,6 @@ fn test_update_block_descendancy_with_fork() {
     let mut cmts_genesis = vec![];
     let mut cmts_invalid = vec![];
 
-    let mut fork_parent = None;
-    let mut fork_parent_block_header: Option<BurnchainBlockHeader> = None;
     let mut fork_cmts = vec![];
 
     for i in 0..5 {
@@ -954,7 +952,6 @@ fn test_update_block_descendancy_with_fork() {
         };
 
         fork_headers.push(block_header.clone());
-        fork_parent_block_header = Some(block_header);
     }
 
     let mut am_id = 0;
@@ -1018,7 +1015,6 @@ fn test_update_block_descendancy_with_fork() {
         fork_cmts.push(fork_cmt.clone());
 
         parent = Some(cmt);
-        fork_parent = Some(fork_cmt);
 
         if i == 0 {
             am_id = {
diff --git a/stackslib/src/chainstate/coordinator/tests.rs b/stackslib/src/chainstate/coordinator/tests.rs
index 0863708122..a56d0c6f67 100644
--- a/stackslib/src/chainstate/coordinator/tests.rs
+++ b/stackslib/src/chainstate/coordinator/tests.rs
@@ -2268,7 +2268,6 @@ fn test_sortition_with_reward_set() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // split up the vrf keys and committers so that we have some that will be mining "correctly"
     //   and some that will be producing bad outputs
@@ -2442,10 +2441,6 @@ fn test_sortition_with_reward_set() {
         let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap();
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             started_first_reward_cycle = true;
-            // store the anchor block for this sortition for later checking
-            let ic = sort_db.index_handle_at_tip();
-            let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-            anchor_blocks.push(bhh);
         }
 
         let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap();
@@ -2540,7 +2535,6 @@ fn test_sortition_with_burner_reward_set() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // split up the vrf keys and committers so that we have some that will be mining "correctly"
     //   and some that will be producing bad outputs
@@ -2688,10 +2682,6 @@ fn test_sortition_with_burner_reward_set() {
         let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap();
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             started_first_reward_cycle = true;
-            // store the anchor block for this sortition for later checking
-            let ic = sort_db.index_handle_at_tip();
-            let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-            anchor_blocks.push(bhh);
         }
 
         let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap();
@@ -2804,7 +2794,6 @@ fn test_pox_btc_ops() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // track the reward set consumption
     let mut reward_cycle_count = 0;
@@ -2972,10 +2961,6 @@ fn test_pox_btc_ops() {
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             if new_burnchain_tip.block_height < sunset_ht {
                 started_first_reward_cycle = true;
-                // store the anchor block for this sortition for later checking
-                let ic = sort_db.index_handle_at_tip();
-                let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-                anchor_blocks.push(bhh);
             } else {
                 // store the anchor block for this sortition for later checking
                 let ic = sort_db.index_handle_at_tip();
@@ -3096,7 +3081,6 @@ fn test_stx_transfer_btc_ops() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // track the reward set consumption
     let mut reward_recipients = HashSet::new();
@@ -3319,10 +3303,6 @@ fn test_stx_transfer_btc_ops() {
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             if new_burnchain_tip.block_height < sunset_ht {
                 started_first_reward_cycle = true;
-                // store the anchor block for this sortition for later checking
-                let ic = sort_db.index_handle_at_tip();
-                let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-                anchor_blocks.push(bhh);
             } else {
                 // store the anchor block for this sortition for later checking
                 let ic = sort_db.index_handle_at_tip();
@@ -5303,7 +5283,6 @@ fn test_sortition_with_sunset() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // split up the vrf keys and committers so that we have some that will be mining "correctly"
     //   and some that will be producing bad outputs
@@ -5487,10 +5466,6 @@ fn test_sortition_with_sunset() {
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             if new_burnchain_tip.block_height < sunset_ht {
                 started_first_reward_cycle = true;
-                // store the anchor block for this sortition for later checking
-                let ic = sort_db.index_handle_at_tip();
-                let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-                anchor_blocks.push(bhh);
             } else {
                 // store the anchor block for this sortition for later checking
                 let ic = sort_db.index_handle_at_tip();
@@ -5616,7 +5591,6 @@ fn test_sortition_with_sunset_and_epoch_switch() {
     let mut started_first_reward_cycle = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // split up the vrf keys and committers so that we have some that will be mining "correctly"
     //   and some that will be producing bad outputs
@@ -5828,10 +5802,6 @@ fn test_sortition_with_sunset_and_epoch_switch() {
         if b.is_reward_cycle_start(new_burnchain_tip.block_height) {
             if new_burnchain_tip.block_height < sunset_ht {
                 started_first_reward_cycle = true;
-                // store the anchor block for this sortition for later checking
-                let ic = sort_db.index_handle_at_tip();
-                let bhh = ic.get_last_anchor_block_hash().unwrap().unwrap();
-                anchor_blocks.push(bhh);
             } else {
                 // store the anchor block for this sortition for later checking
                 let ic = sort_db.index_handle_at_tip();
@@ -6479,7 +6449,6 @@ fn test_pox_fork_out_of_order() {
     let mut sortition_ids_diverged = false;
     // process sequential blocks, and their sortitions...
     let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![];
-    let mut anchor_blocks = vec![];
 
     // setup:
     //  2 forks: 0 - 1 - 2 - 3 - 4 - 5 - 11 - 12 - 13 - 14 - 15
@@ -6560,8 +6529,6 @@ fn test_pox_fork_out_of_order() {
                 .unwrap()
                 .block_height
             );
-
-            anchor_blocks.push(bhh);
         }
 
         let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap();
diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
index e0b3375452..b8c93d427f 100644
--- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
+++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
@@ -112,6 +112,8 @@ fn advance_to_nakamoto(
     let default_pox_addr =
         PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone());
 
+    // Stores the result of a function with side effects, so have Clippy ignore it
+    #[allow(clippy::collection_is_never_read)]
     let mut tip = None;
     for sortition_height in 0..11 {
         // stack to pox-3 in cycle 7
@@ -347,9 +349,6 @@ fn replay_reward_cycle(
         .step_by(reward_cycle_length)
         .collect();
 
-    let mut indexes: Vec<_> = (0..stacks_blocks.len()).collect();
-    indexes.shuffle(&mut thread_rng());
-
     for burn_ops in burn_ops.iter() {
         let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone());
     }
@@ -845,7 +844,6 @@ fn block_descendant() {
     boot_plan.pox_constants = pox_constants;
 
     let mut peer = boot_plan.boot_into_nakamoto_peer(vec![], None);
-    let mut blocks = vec![];
     let pox_constants = peer.sortdb().pox_constants.clone();
     let first_burn_height = peer.sortdb().first_block_height;
 
@@ -854,7 +852,6 @@ fn block_descendant() {
     loop {
         let (block, burn_height, ..) =
             peer.single_block_tenure(&private_key, |_| {}, |_| {}, |_| true);
-        blocks.push(block);
 
         if pox_constants.is_in_prepare_phase(first_burn_height, burn_height + 1) {
             info!("At prepare phase start"; "burn_height" => burn_height);
@@ -3206,9 +3203,6 @@ fn test_stacks_on_burnchain_ops() {
     );
 
     let mut all_blocks: Vec<NakamotoBlock> = vec![];
-    let mut all_burn_ops = vec![];
-    let mut consensus_hashes = vec![];
-    let mut fee_counts = vec![];
     let stx_miner_key = peer.miner.nakamoto_miner_key();
 
     let mut extra_burn_ops = vec![];
@@ -3406,8 +3400,6 @@ fn test_stacks_on_burnchain_ops() {
             })
             .sum::<u128>();
 
-        consensus_hashes.push(consensus_hash);
-        fee_counts.push(fees);
         let mut blocks: Vec<NakamotoBlock> = blocks_and_sizes
             .into_iter()
             .map(|(block, _, _)| block)
@@ -3449,7 +3441,6 @@ fn test_stacks_on_burnchain_ops() {
         );
 
         all_blocks.append(&mut blocks);
-        all_burn_ops.push(burn_ops);
     }
 
     // check receipts for burn ops
diff --git a/stackslib/src/chainstate/nakamoto/tests/mod.rs b/stackslib/src/chainstate/nakamoto/tests/mod.rs
index bd415b68b0..4d64a1e4f1 100644
--- a/stackslib/src/chainstate/nakamoto/tests/mod.rs
+++ b/stackslib/src/chainstate/nakamoto/tests/mod.rs
@@ -2529,8 +2529,6 @@ fn parse_vote_for_aggregate_public_key_invalid() {
     };
     invalid_function_arg_reward_cycle.set_origin_nonce(1);
 
-    let mut account_nonces = std::collections::HashMap::new();
-    account_nonces.insert(invalid_contract_name.origin_address(), 1);
     for (i, tx) in vec![
         invalid_contract_address,
         invalid_contract_name,
diff --git a/stackslib/src/chainstate/stacks/block.rs b/stackslib/src/chainstate/stacks/block.rs
index 51c53c94de..fa08e0f06d 100644
--- a/stackslib/src/chainstate/stacks/block.rs
+++ b/stackslib/src/chainstate/stacks/block.rs
@@ -1146,19 +1146,6 @@ mod test {
             StacksEpochId::latest(),
         );
 
-        // remove all coinbases
-        let mut txs_anchored = vec![];
-
-        for tx in all_txs.iter() {
-            match tx.payload {
-                TransactionPayload::Coinbase(..) => {
-                    continue;
-                }
-                _ => {}
-            }
-            txs_anchored.push(tx);
-        }
-
         // make microblocks with 3 transactions each (or fewer)
         for i in 0..(all_txs.len() / 3) {
             let txs = vec![
diff --git a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs
index 072f1d33ef..f6c9b7d012 100644
--- a/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs
+++ b/stackslib/src/chainstate/stacks/boot/pox_4_tests.rs
@@ -928,6 +928,8 @@ fn pox_lock_unlock() {
 
     assert_eq!(burnchain.pox_constants.reward_slots(), 6);
     let mut coinbase_nonce = 0;
+    // Stores the result of a function with side effects, so have Clippy ignore it
+    #[allow(clippy::collection_is_never_read)]
     let mut latest_block = None;
 
     // Advance into pox4
@@ -2685,6 +2687,8 @@ fn pox_4_delegate_stack_increase_events() {
 
     assert_eq!(burnchain.pox_constants.reward_slots(), 6);
     let mut coinbase_nonce = 0;
+    // Stores the result of a function with side effects, so have Clippy ignore it
+    #[allow(clippy::collection_is_never_read)]
     let mut latest_block = None;
 
     let alice_key = keys.pop().unwrap();
diff --git a/stackslib/src/chainstate/stacks/db/transactions.rs b/stackslib/src/chainstate/stacks/db/transactions.rs
index e56624b84f..87e29e3f10 100644
--- a/stackslib/src/chainstate/stacks/db/transactions.rs
+++ b/stackslib/src/chainstate/stacks/db/transactions.rs
@@ -72,8 +72,9 @@ impl TryFrom<Value> for HashableClarityValue {
 
 impl std::hash::Hash for HashableClarityValue {
     fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
-        #[allow(clippy::unwrap_used)]
+        #[allow(clippy::unwrap_used, clippy::collection_is_never_read)]
         // this unwrap is safe _as long as_ TryFrom<Value> was used as a constructor
+        // Also, this function has side effects, which cause Clippy to wrongly think `bytes` is unused
         let bytes = self.0.serialize_to_vec().unwrap();
         bytes.hash(state);
     }
diff --git a/stackslib/src/chainstate/stacks/index/test/trie.rs b/stackslib/src/chainstate/stacks/index/test/trie.rs
index 8625527a16..9a130bf9d7 100644
--- a/stackslib/src/chainstate/stacks/index/test/trie.rs
+++ b/stackslib/src/chainstate/stacks/index/test/trie.rs
@@ -1245,8 +1245,6 @@ fn trie_cursor_splice_leaf_4() {
             let (nodes, node_ptrs, hashes) =
                 make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec());
 
-            let mut ptrs = vec![];
-
             // splice in a node in each path segment
             for k in 0..5 {
                 let mut path = vec![
@@ -1274,7 +1272,6 @@ fn trie_cursor_splice_leaf_4() {
                     &mut node,
                 )
                 .unwrap();
-                ptrs.push(new_ptr);
 
                 Trie::update_root_hash(&mut f, &c).unwrap();
 
@@ -1338,7 +1335,6 @@ fn trie_cursor_splice_leaf_2() {
 
             let (nodes, node_ptrs, hashes) =
                 make_node_path(&mut f, node_id.to_u8(), &path_segments, [31u8; 40].to_vec());
-            let mut ptrs = vec![];
 
             // splice in a node in each path segment
             for k in 0..10 {
@@ -1363,7 +1359,6 @@ fn trie_cursor_splice_leaf_2() {
                     &mut node,
                 )
                 .unwrap();
-                ptrs.push(new_ptr);
 
                 Trie::update_root_hash(&mut f, &c).unwrap();
 
diff --git a/stackslib/src/chainstate/stacks/tests/block_construction.rs b/stackslib/src/chainstate/stacks/tests/block_construction.rs
index bcf7611695..4b28f637a5 100644
--- a/stackslib/src/chainstate/stacks/tests/block_construction.rs
+++ b/stackslib/src/chainstate/stacks/tests/block_construction.rs
@@ -187,7 +187,6 @@ fn test_build_anchored_blocks_stx_transfers_single() {
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
     let mut sender_nonce = 0;
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -271,8 +270,6 @@ fn test_build_anchored_blocks_stx_transfers_single() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -324,7 +321,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() {
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
     let mut sender_nonce = 0;
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -412,8 +408,6 @@ fn test_build_anchored_blocks_empty_with_builder_timeout() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -462,7 +456,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() {
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
     let mut sender_nonce = 0;
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -574,8 +567,6 @@ fn test_build_anchored_blocks_stx_transfers_multi() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -1375,7 +1366,6 @@ fn test_build_anchored_blocks_skip_too_expensive() {
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
     let mut sender_nonce = 0;
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -1515,8 +1505,6 @@ fn test_build_anchored_blocks_skip_too_expensive() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -1794,7 +1782,6 @@ fn test_build_anchored_blocks_multiple_chaintips() {
         sn.block_height
     };
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -1889,8 +1876,6 @@ fn test_build_anchored_blocks_multiple_chaintips() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -1936,7 +1921,6 @@ fn test_build_anchored_blocks_empty_chaintips() {
         sn.block_height
     };
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -2025,8 +2009,6 @@ fn test_build_anchored_blocks_empty_chaintips() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -2079,7 +2061,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() {
         sn.block_height
     };
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -2203,8 +2184,6 @@ fn test_build_anchored_blocks_too_expensive_transactions() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -2235,7 +2214,6 @@ fn test_build_anchored_blocks_invalid() {
 
     let mut last_block: Option<StacksBlock> = None;
     let mut last_valid_block: Option<StacksBlock> = None;
-    let mut last_tip: Option<BlockSnapshot> = None;
     let mut last_parent: Option<StacksBlock> = None;
     let mut last_parent_tip: Option<StacksHeaderInfo> = None;
 
@@ -2267,8 +2245,6 @@ fn test_build_anchored_blocks_invalid() {
             tip = resume_tip.clone().unwrap();
         }
 
-        last_tip = Some(tip.clone());
-
         let (mut burn_ops, stacks_block, microblocks) = peer.make_tenure(|ref mut miner, ref mut sortdb, ref mut chainstate, vrf_proof, ref parent_opt, ref parent_microblock_header_opt| {
             let parent_opt =
                 if tenure_id != bad_block_tenure {
@@ -2444,7 +2420,6 @@ fn test_build_anchored_blocks_bad_nonces() {
         sn.block_height
     };
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         eprintln!("Start tenure {:?}", tenure_id);
         // send transactions to the mempool
@@ -2640,8 +2615,6 @@ fn test_build_anchored_blocks_bad_nonces() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
@@ -2699,7 +2672,6 @@ fn test_build_microblock_stream_forks() {
     let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV";
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -2910,8 +2882,6 @@ fn test_build_microblock_stream_forks() {
             },
         );
 
-        last_block = Some(stacks_block.clone());
-
         peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
     }
@@ -3494,12 +3464,6 @@ fn test_contract_call_across_clarity_versions() {
     let num_blocks = 10;
     let mut anchored_sender_nonce = 0;
 
-    let mut mblock_privks = vec![];
-    for _ in 0..num_blocks {
-        let mblock_privk = StacksPrivateKey::new();
-        mblock_privks.push(mblock_privk);
-    }
-
     let mut peer = TestPeer::new(peer_config);
 
     let chainstate_path = peer.chainstate_path.clone();
@@ -4566,7 +4530,6 @@ fn mempool_incorporate_pox_unlocks() {
     let recipient_addr_str = "ST1RFD5Q2QPK3E0F08HG9XDX7SSC7CNRS0QR0SGEV";
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
 
-    let mut last_block = None;
     for tenure_id in 0..num_blocks {
         // send transactions to the mempool
         let tip = SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn())
@@ -4719,11 +4682,6 @@ fn mempool_incorporate_pox_unlocks() {
 
         let (_, _, consensus_hash) = peer.next_burnchain_block(burn_ops.clone());
         peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
-
-        last_block = Some(StacksBlockHeader::make_index_block_hash(
-            &consensus_hash,
-            &stacks_block.block_hash(),
-        ));
     }
 }
 
@@ -4763,7 +4721,6 @@ fn test_fee_order_mismatch_nonce_order() {
     let recipient = StacksAddress::from_string(recipient_addr_str).unwrap();
     let sender_nonce = 0;
 
-    let mut last_block = None;
     // send transactions to the mempool
     let tip =
         SortitionDB::get_canonical_burn_chain_tip(&peer.sortdb.as_ref().unwrap().conn()).unwrap();
@@ -4852,8 +4809,6 @@ fn test_fee_order_mismatch_nonce_order() {
         },
     );
 
-    last_block = Some(stacks_block.clone());
-
     peer.next_burnchain_block(burn_ops.clone());
     peer.process_stacks_epoch_at_tip(&stacks_block, &microblocks);
 
diff --git a/stackslib/src/chainstate/stacks/tests/chain_histories.rs b/stackslib/src/chainstate/stacks/tests/chain_histories.rs
index b8441a3cbb..763942c684 100644
--- a/stackslib/src/chainstate/stacks/tests/chain_histories.rs
+++ b/stackslib/src/chainstate/stacks/tests/chain_histories.rs
@@ -281,8 +281,6 @@ where
         ],
     );
 
-    let mut sortition_winners = vec![];
-
     let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap();
     let mut fork = TestBurnchainFork::new(
         first_snapshot.block_height,
@@ -415,8 +413,6 @@ where
             chain_tip.anchored_header.as_stacks_epoch2().unwrap(),
         ));
 
-        sortition_winners.push(miner_1.origin_address().unwrap());
-
         let mut next_miner_trace = TestMinerTracePoint::new();
         next_miner_trace.add(
             miner_1.id,
@@ -631,7 +627,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_1.header
             ));
-            sortition_winners.push(miner_1.origin_address().unwrap());
 
             next_miner_trace.add(
                 miner_1.id,
@@ -653,7 +648,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_2.header
             ));
-            sortition_winners.push(miner_2.origin_address().unwrap());
 
             next_miner_trace.add(
                 miner_2.id,
@@ -735,8 +729,6 @@ where
         ],
     );
 
-    let mut sortition_winners = vec![];
-
     let first_snapshot = SortitionDB::get_first_block_snapshot(burn_node.sortdb.conn()).unwrap();
     let mut fork = TestBurnchainFork::new(
         first_snapshot.block_height,
@@ -960,7 +952,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_1.header
             ));
-            sortition_winners.push(miner_1.origin_address().unwrap());
         } else {
             test_debug!(
                 "\n\nMiner 2 ({}) won sortition\n",
@@ -973,7 +964,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_2.header
             ));
-            sortition_winners.push(miner_2.origin_address().unwrap());
         }
 
         // add both blocks to the miner trace, because in this test runner, there will be _two_
@@ -999,8 +989,6 @@ where
 
     test_debug!("\n\nMiner 1 and Miner 2 now separate\n\n");
 
-    let mut sortition_winners_1 = sortition_winners.clone();
-    let mut sortition_winners_2 = sortition_winners.clone();
     let snapshot_at_fork = {
         let ic = burn_node.sortdb.index_conn();
         let tip = fork.get_tip(&ic);
@@ -1244,7 +1232,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_1.header
             ));
-            sortition_winners_1.push(miner_1.origin_address().unwrap());
         } else {
             test_debug!(
                 "\n\nMiner 2 ({}) won sortition\n",
@@ -1257,7 +1244,6 @@ where
                 &fork_snapshot.consensus_hash,
                 &stacks_block_2.header
             ));
-            sortition_winners_2.push(miner_2.origin_address().unwrap());
         }
 
         // each miner produced a block; just one of them got accepted
diff --git a/stackslib/src/chainstate/stacks/tests/mod.rs b/stackslib/src/chainstate/stacks/tests/mod.rs
index 9f5dd9c860..358ab3bf71 100644
--- a/stackslib/src/chainstate/stacks/tests/mod.rs
+++ b/stackslib/src/chainstate/stacks/tests/mod.rs
@@ -839,7 +839,6 @@ pub fn check_mining_reward(
     block_height: u64,
     prev_block_rewards: &Vec<Vec<MinerPaymentSchedule>>,
 ) -> bool {
-    let mut block_rewards = HashMap::new();
     let mut stream_rewards = HashMap::new();
     let mut heights = HashMap::new();
     let mut confirmed = HashSet::new();
@@ -849,9 +848,6 @@ pub fn check_mining_reward(
                 &reward.consensus_hash,
                 &reward.block_hash,
             );
-            if reward.coinbase > 0 {
-                block_rewards.insert(ibh.clone(), reward.clone());
-            }
             if let MinerPaymentTxFees::Epoch2 { streamed, .. } = &reward.tx_fees {
                 if *streamed > 0 {
                     stream_rewards.insert(ibh.clone(), reward.clone());
diff --git a/stackslib/src/chainstate/stacks/transaction.rs b/stackslib/src/chainstate/stacks/transaction.rs
index d813dbcf01..765da5499d 100644
--- a/stackslib/src/chainstate/stacks/transaction.rs
+++ b/stackslib/src/chainstate/stacks/transaction.rs
@@ -3418,9 +3418,6 @@ mod test {
         let function_name = ClarityName::try_from("hello-function-name").unwrap();
         let function_args = vec![Value::Int(0)];
 
-        let mut contract_name_bytes = vec![contract_name.len() as u8];
-        contract_name_bytes.extend_from_slice(contract_name.as_bytes());
-
         let mut contract_call_bytes = vec![];
         address
             .consensus_serialize(&mut contract_call_bytes)
diff --git a/stackslib/src/core/tests/mod.rs b/stackslib/src/core/tests/mod.rs
index 4477c93b93..ed62de2b42 100644
--- a/stackslib/src/core/tests/mod.rs
+++ b/stackslib/src/core/tests/mod.rs
@@ -2692,7 +2692,6 @@ fn test_filter_txs_by_type() {
         version: 1,
         bytes: Hash160([0xff; 20]),
     };
-    let mut txs = vec![];
     let block_height = 10;
     let mut total_len = 0;
 
@@ -2756,8 +2755,7 @@ fn test_filter_txs_by_type() {
         )
         .unwrap();
 
-        eprintln!("Added {} {}", i, &txid);
-        txs.push(tx);
+        eprintln!("Added {i} {txid}");
     }
     mempool_tx.commit().unwrap();
 
diff --git a/stackslib/src/net/atlas/tests.rs b/stackslib/src/net/atlas/tests.rs
index 8094c77799..d0ecd2fe22 100644
--- a/stackslib/src/net/atlas/tests.rs
+++ b/stackslib/src/net/atlas/tests.rs
@@ -685,20 +685,15 @@ fn test_downloader_context_attachment_requests() {
     let peer_url_3 = request_3.get_url().clone();
     let request_4 = inventories_requests.pop().unwrap();
     let peer_url_4 = request_4.get_url().clone();
-    let mut responses = HashMap::new();
 
     let response_1 =
         new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]);
-    responses.insert(peer_url_1.clone(), Some(response_1.clone()));
 
     let response_2 =
         new_attachments_inventory_response(vec![(0, vec![1, 1, 1]), (1, vec![0, 0, 0])]);
-    responses.insert(peer_url_2.clone(), Some(response_2.clone()));
 
     let response_3 =
         new_attachments_inventory_response(vec![(0, vec![0, 1, 1]), (1, vec![1, 0, 0])]);
-    responses.insert(peer_url_3.clone(), Some(response_3.clone()));
-    responses.insert(peer_url_4, None);
 
     inventories_results
         .succeeded
diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs
index 9c995f1f32..fe193397ed 100644
--- a/stackslib/src/net/tests/download/epoch2x.rs
+++ b/stackslib/src/net/tests/download/epoch2x.rs
@@ -477,10 +477,9 @@ where
 
     info!("Completed walk round {} step(s)", round);
 
-    let mut peer_invs = vec![];
     for peer in peers.iter_mut() {
-        let peer_inv = get_blocks_inventory(peer, 0, num_burn_blocks);
-        peer_invs.push(peer_inv);
+        // TODO: Remove if this function has no side effects
+        let _ = get_blocks_inventory(peer, 0, num_burn_blocks);
 
         let availability = get_peer_availability(
             peer,
diff --git a/stackslib/src/net/tests/mempool/mod.rs b/stackslib/src/net/tests/mempool/mod.rs
index 436e5a315a..0b3ca27913 100644
--- a/stackslib/src/net/tests/mempool/mod.rs
+++ b/stackslib/src/net/tests/mempool/mod.rs
@@ -765,7 +765,6 @@ fn test_mempool_sync_2_peers_problematic() {
     let stacks_tip_bhh = peer_1.network.stacks_tip.block_hash.clone();
 
     // fill peer 1 with lots of transactions
-    let mut txs = HashMap::new();
     let mut peer_1_mempool = peer_1.mempool.take().unwrap();
     let mut mempool_tx = peer_1_mempool.tx_begin().unwrap();
     for i in 0..num_txs {
@@ -792,8 +791,6 @@ fn test_mempool_sync_2_peers_problematic() {
         let sponsor_nonce = tx.get_sponsor_nonce().unwrap_or(origin_nonce);
         let tx_fee = tx.get_tx_fee();
 
-        txs.insert(tx.txid(), tx.clone());
-
         // should succeed
         MemPoolDB::try_add_tx(
             &mut mempool_tx,
@@ -813,7 +810,7 @@ fn test_mempool_sync_2_peers_problematic() {
         )
         .unwrap();
 
-        eprintln!("Added {} {}", i, &txid);
+        eprintln!("Added {i} {txid}");
     }
     mempool_tx.commit().unwrap();
     peer_1.mempool = Some(peer_1_mempool);
diff --git a/stackslib/src/net/tests/mod.rs b/stackslib/src/net/tests/mod.rs
index 3a07ed006c..0329adc183 100644
--- a/stackslib/src/net/tests/mod.rs
+++ b/stackslib/src/net/tests/mod.rs
@@ -679,7 +679,6 @@ impl NakamotoBootPlan {
 
         let mut all_blocks = vec![];
         let mut malleablized_block_ids = HashSet::new();
-        let mut consensus_hashes = vec![];
         let mut last_tenure_change: Option<TenureChangePayload> = None;
         let mut blocks_since_last_tenure = 0;
 
@@ -761,7 +760,6 @@ impl NakamotoBootPlan {
                         });
 
                     peer.refresh_burnchain_view();
-                    consensus_hashes.push(next_consensus_hash);
 
                     let blocks: Vec<NakamotoBlock> = blocks_and_sizes
                         .into_iter()
@@ -862,7 +860,6 @@ impl NakamotoBootPlan {
                         });
                     peer.refresh_burnchain_view();
 
-                    consensus_hashes.push(consensus_hash);
                     let blocks: Vec<NakamotoBlock> = blocks_and_sizes
                         .into_iter()
                         .map(|(block, _, _)| block)
@@ -958,14 +955,13 @@ impl NakamotoBootPlan {
 
                     // each transaction was mined in the same order as described in the boot plan,
                     // and it succeeded.
-                    let mut burn_receipts = vec![];
                     let mut stacks_receipts = vec![];
                     for receipt in observed_block.receipts.iter() {
                         match &receipt.transaction {
                             TransactionOrigin::Stacks(..) => {
                                 stacks_receipts.push(receipt);
                             }
-                            TransactionOrigin::Burn(..) => burn_receipts.push(receipt),
+                            TransactionOrigin::Burn(..) => {}
                         }
                     }
 
diff --git a/stackslib/src/net/tests/relay/nakamoto.rs b/stackslib/src/net/tests/relay/nakamoto.rs
index 606f1f3fb2..e26f1a3142 100644
--- a/stackslib/src/net/tests/relay/nakamoto.rs
+++ b/stackslib/src/net/tests/relay/nakamoto.rs
@@ -212,9 +212,6 @@ impl SeedNode {
         let test_stackers = peer.config.test_stackers.take().unwrap();
 
         let mut all_blocks: Vec<NakamotoBlock> = vec![];
-        let mut all_burn_ops = vec![];
-        let mut rc_blocks = vec![];
-        let mut rc_burn_ops = vec![];
 
         // have the peer mine some blocks for two reward cycles
         for i in 0..(2 * rc_len) {
@@ -330,15 +327,10 @@ impl SeedNode {
                 .burnchain
                 .is_reward_cycle_start(tip.block_height)
             {
-                rc_blocks.push(all_blocks.clone());
-                rc_burn_ops.push(all_burn_ops.clone());
-
-                all_burn_ops.clear();
                 all_blocks.clear();
             }
 
             all_blocks.append(&mut blocks);
-            all_burn_ops.push(burn_ops);
         }
 
         peer.config.test_signers = Some(test_signers);
diff --git a/stackslib/src/util_lib/strings.rs b/stackslib/src/util_lib/strings.rs
index d1fb48c86b..87daf94ace 100644
--- a/stackslib/src/util_lib/strings.rs
+++ b/stackslib/src/util_lib/strings.rs
@@ -353,7 +353,7 @@ mod test {
 
         let mut contract_bytes = vec![s.len() as u8];
         contract_bytes.extend_from_slice(contract_str.as_bytes());
-        check_codec_and_corruption::<ContractName>(&contract_str, &clarity_bytes);
+        check_codec_and_corruption::<ContractName>(&contract_str, &contract_bytes);
     }
 
     #[test]
diff --git a/testnet/stacks-node/src/event_dispatcher.rs b/testnet/stacks-node/src/event_dispatcher.rs
index da1668cdd2..18ee8c1797 100644
--- a/testnet/stacks-node/src/event_dispatcher.rs
+++ b/testnet/stacks-node/src/event_dispatcher.rs
@@ -2266,6 +2266,8 @@ mod test {
         let server = Server::http(format!("127.0.0.1:{port}")).unwrap();
         thread::spawn(move || {
             let mut attempt = 0;
+            // This exists to only keep request from being dropped
+            #[allow(clippy::collection_is_never_read)]
             let mut _request_holder = None;
             while let Ok(request) = server.recv() {
                 attempt += 1;
@@ -2331,6 +2333,8 @@ mod test {
         let server = Server::http(format!("127.0.0.1:{port}")).unwrap();
         thread::spawn(move || {
             let mut attempt = 0;
+            // This exists to only keep request from being dropped
+            #[allow(clippy::collection_is_never_read)]
             let mut _request_holder = None;
             while let Ok(mut request) = server.recv() {
                 attempt += 1;
diff --git a/testnet/stacks-node/src/tests/epoch_21.rs b/testnet/stacks-node/src/tests/epoch_21.rs
index d50cac0117..eaa1e584d7 100644
--- a/testnet/stacks-node/src/tests/epoch_21.rs
+++ b/testnet/stacks-node/src/tests/epoch_21.rs
@@ -1045,7 +1045,6 @@ fn transition_adds_get_pox_addr_recipients() {
     );
 
     let mut spender_sks = vec![];
-    let mut spender_addrs = vec![];
     let mut initial_balances = vec![];
     let mut expected_pox_addrs = HashSet::new();
 
@@ -1056,7 +1055,6 @@ fn transition_adds_get_pox_addr_recipients() {
         let spender_addr: PrincipalData = to_addr(&spender_sk).into();
 
         spender_sks.push(spender_sk);
-        spender_addrs.push(spender_addr.clone());
         initial_balances.push(InitialBalance {
             address: spender_addr.clone(),
             amount: stacked + 100_000,
@@ -1353,8 +1351,6 @@ fn transition_adds_mining_from_segwit() {
         u32::MAX,
     );
 
-    let mut spender_sks = vec![];
-    let mut spender_addrs = vec![];
     let mut initial_balances = vec![];
 
     let stacked = 100_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64);
@@ -1363,8 +1359,6 @@ fn transition_adds_mining_from_segwit() {
         let spender_sk = StacksPrivateKey::new();
         let spender_addr: PrincipalData = to_addr(&spender_sk).into();
 
-        spender_sks.push(spender_sk);
-        spender_addrs.push(spender_addr.clone());
         initial_balances.push(InitialBalance {
             address: spender_addr.clone(),
             amount: stacked + 100_000,
diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
index 1b84b9c0cd..9e4b07b119 100644
--- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs
+++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
@@ -2933,6 +2933,8 @@ fn block_proposal_api_endpoint() {
     let http_origin = format!("http://{}", &conf.node.rpc_bind);
     let path = format!("{http_origin}/v3/block_proposal");
 
+    // Clippy thinks this is unused, but it seems to be holding a lock
+    #[allow(clippy::collection_is_never_read)]
     let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap());
     for (ix, (test_description, block_proposal, expected_http_code, _)) in
         test_cases.iter().enumerate()
@@ -10395,7 +10397,6 @@ fn clarity_cost_spend_down() {
             .get_stacks_blocks_processed();
         // Pause mining so we can add all our transactions to the mempool at once.
         TEST_MINE_STALL.lock().unwrap().replace(true);
-        let mut submitted_txs = vec![];
         for _nmb_tx in 0..nmb_txs_per_signer {
             for sender_sk in sender_sks.iter() {
                 let sender_nonce = get_and_increment_nonce(&sender_sk, &mut sender_nonces);
@@ -10411,9 +10412,7 @@ fn clarity_cost_spend_down() {
                     &[],
                 );
                 match submit_tx_fallible(&http_origin, &contract_tx) {
-                    Ok(txid) => {
-                        submitted_txs.push(txid);
-                    }
+                    Ok(_txid) => {}
                     Err(_e) => {
                         // If we fail to submit a tx, we need to make sure we don't
                         // increment the nonce for this sender, so we don't end up
diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs
index a3ce78eb24..ed5fd9bbaa 100644
--- a/testnet/stacks-node/src/tests/neon_integrations.rs
+++ b/testnet/stacks-node/src/tests/neon_integrations.rs
@@ -8473,8 +8473,6 @@ fn atlas_stress_integration_test() {
         panic!();
     }
 
-    let mut all_zonefiles = vec![];
-
     // make a _ton_ of name-imports
     for i in 0..batches {
         let account_before = get_account(&http_origin, &to_addr(&user_1));
@@ -8486,8 +8484,6 @@ fn atlas_stress_integration_test() {
             let zonefile_hex = format!("facade00{:04x}{:04x}{:04x}", batch_size * i + j, i, j);
             let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap());
 
-            all_zonefiles.push(zonefile_hex.clone());
-
             let tx_3 = make_contract_call(
                 &user_1,
                 2 + (batch_size * i + j) as u64,
@@ -8675,8 +8671,6 @@ fn atlas_stress_integration_test() {
             let zonefile_hex = format!("facade01{j:04x}");
             let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap());
 
-            all_zonefiles.push(zonefile_hex.clone());
-
             let tx_6 = make_contract_call(
                 &users[batches * batch_size + j],
                 1,
@@ -8739,8 +8733,6 @@ fn atlas_stress_integration_test() {
             let zonefile_hex = format!("facade02{j:04x}");
             let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap());
 
-            all_zonefiles.push(zonefile_hex.clone());
-
             let tx_7 = make_contract_call(
                 &users[batches * batch_size + j],
                 2,
@@ -8802,8 +8794,6 @@ fn atlas_stress_integration_test() {
             let zonefile_hex = format!("facade03{j:04x}");
             let hashed_zonefile = Hash160::from_data(&hex_bytes(&zonefile_hex).unwrap());
 
-            all_zonefiles.push(zonefile_hex.clone());
-
             let tx_8 = make_contract_call(
                 &users[batches * batch_size + j],
                 3,

From 9f1c4314ea02701c6b89b83795305519f1450cb4 Mon Sep 17 00:00:00 2001
From: Jeff Bencin <jeff.bencin@gmail.com>
Date: Thu, 23 Jan 2025 09:38:46 -0500
Subject: [PATCH 2/2] chore: Apply PR comments from Brice

---
 stackslib/src/chainstate/nakamoto/coordinator/tests.rs | 5 +----
 stackslib/src/net/tests/download/epoch2x.rs            | 9 +++------
 testnet/stacks-node/src/tests/nakamoto_integrations.rs | 4 +---
 3 files changed, 5 insertions(+), 13 deletions(-)

diff --git a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
index b8c93d427f..eb6fde3b12 100644
--- a/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
+++ b/stackslib/src/chainstate/nakamoto/coordinator/tests.rs
@@ -112,9 +112,6 @@ fn advance_to_nakamoto(
     let default_pox_addr =
         PoxAddress::from_legacy(AddressHashMode::SerializeP2PKH, addr.bytes.clone());
 
-    // Stores the result of a function with side effects, so have Clippy ignore it
-    #[allow(clippy::collection_is_never_read)]
-    let mut tip = None;
     for sortition_height in 0..11 {
         // stack to pox-3 in cycle 7
         let txs = if sortition_height == 6 {
@@ -158,7 +155,7 @@ fn advance_to_nakamoto(
             vec![]
         };
 
-        tip = Some(peer.tenure_with_txs(&txs, &mut peer_nonce));
+        peer.tenure_with_txs(&txs, &mut peer_nonce);
     }
     // peer is at the start of cycle 8
 }
diff --git a/stackslib/src/net/tests/download/epoch2x.rs b/stackslib/src/net/tests/download/epoch2x.rs
index ed255c5271..850be9cb5e 100644
--- a/stackslib/src/net/tests/download/epoch2x.rs
+++ b/stackslib/src/net/tests/download/epoch2x.rs
@@ -216,10 +216,10 @@ fn test_get_block_availability() {
     })
 }
 
-fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64) -> BlocksInvData {
+fn get_blocks_inventory(peer: &TestPeer, start_height: u64, end_height: u64) -> BlocksInvData {
     let block_hashes = {
         let num_headers = end_height - start_height;
-        let ic = peer.sortdb.as_mut().unwrap().index_conn();
+        let ic = peer.sortdb.as_ref().unwrap().index_conn();
         let tip = SortitionDB::get_canonical_burn_chain_tip(&ic).unwrap();
         let ancestor = SortitionDB::get_ancestor_snapshot(&ic, end_height, &tip.sortition_id)
             .unwrap()
@@ -233,7 +233,7 @@ fn get_blocks_inventory(peer: &mut TestPeer, start_height: u64, end_height: u64)
     };
 
     let inv = peer
-        .chainstate()
+        .chainstate_ref()
         .get_blocks_inventory(&block_hashes)
         .unwrap();
     inv
@@ -476,9 +476,6 @@ where
     info!("Completed walk round {} step(s)", round);
 
     for peer in peers.iter_mut() {
-        // TODO: Remove if this function has no side effects
-        let _ = get_blocks_inventory(peer, 0, num_burn_blocks);
-
         let availability = get_peer_availability(
             peer,
             first_stacks_block_height - first_sortition_height,
diff --git a/testnet/stacks-node/src/tests/nakamoto_integrations.rs b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
index fb305c72d2..b6c2eb372c 100644
--- a/testnet/stacks-node/src/tests/nakamoto_integrations.rs
+++ b/testnet/stacks-node/src/tests/nakamoto_integrations.rs
@@ -3025,8 +3025,6 @@ fn block_proposal_api_endpoint() {
     let http_origin = format!("http://{}", &conf.node.rpc_bind);
     let path = format!("{http_origin}/v3/block_proposal");
 
-    // Clippy thinks this is unused, but it seems to be holding a lock
-    #[allow(clippy::collection_is_never_read)]
     let mut hold_proposal_mutex = Some(test_observer::PROPOSAL_RESPONSES.lock().unwrap());
     for (ix, (test_description, block_proposal, expected_http_code, _)) in
         test_cases.iter().enumerate()
@@ -3084,7 +3082,7 @@ fn block_proposal_api_endpoint() {
 
         if ix == 1 {
             // release the test observer mutex so that the handler from 0 can finish!
-            hold_proposal_mutex.take();
+            _ = hold_proposal_mutex.take();
         }
     }