From 3f77f1b17cd1a108979760a05f1a41a0b5aae4d4 Mon Sep 17 00:00:00 2001 From: Josh Rickmar Date: Fri, 19 Jan 2024 21:46:10 +0000 Subject: [PATCH] Remove unused fadded parameter from spv rescan method The fadded parameter recorded additional outpoints added to the version 1 cfilters during a rescan, so later blocks would be rescanned with the correct filter set. Version 2 compact filters only match address scripts, not outpoints, which led this parameter to be effectively unused, and all logic that checked for added entries to never run. Remove it and clean up the calling code. No functional change intended. --- spv/backend.go | 179 ++++++++++++++++++++++--------------------------- spv/rescan.go | 19 +++--- spv/sync.go | 134 +++++++++++++++++------------------- 3 files changed, 152 insertions(+), 180 deletions(-) diff --git a/spv/backend.go b/spv/backend.go index 8c834b860..da20e6c4a 100644 --- a/spv/backend.go +++ b/spv/backend.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2019 The Decred developers +// Copyright (c) 2018-2024 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -443,123 +443,108 @@ func (s *Syncer) Rescan(ctx context.Context, blockHashes []chainhash.Hash, save blockMatches := make([]*wire.MsgBlock, len(blockHashes)) // Block assigned to slice once fetched - // Read current filter data. filterData is reassinged to new data matches - // for subsequent filter checks, which improves filter matching performance - // by checking for less data. + // Read current filter data. s.filterMu.Lock() filterData := s.filterData s.filterMu.Unlock() - idx := 0 -FilterLoop: - for idx < len(blockHashes) { - var fmatches []*chainhash.Hash - var fmatchidx []int - var fmatchMu sync.Mutex - - // Spawn ncpu workers to check filter matches - ncpu := runtime.NumCPU() - c := make(chan int, ncpu) - var wg sync.WaitGroup - wg.Add(ncpu) - for i := 0; i < ncpu; i++ { - go func() { - for i := range c { - blockHash := &blockHashes[i] - key := cfilterKeys[i] - f := cfilters[i] - if f.MatchAny(key, filterData) { - fmatchMu.Lock() - fmatches = append(fmatches, blockHash) - fmatchidx = append(fmatchidx, i) - fmatchMu.Unlock() - } + var fmatches []*chainhash.Hash + var fmatchidx []int + var fmatchMu sync.Mutex + + // Spawn ncpu workers to check filter matches + ncpu := runtime.NumCPU() + c := make(chan int, ncpu) + var wg sync.WaitGroup + wg.Add(ncpu) + for i := 0; i < ncpu; i++ { + go func() { + for i := range c { + blockHash := &blockHashes[i] + key := cfilterKeys[i] + f := cfilters[i] + if f.MatchAny(key, filterData) { + fmatchMu.Lock() + fmatches = append(fmatches, blockHash) + fmatchidx = append(fmatchidx, i) + fmatchMu.Unlock() } - wg.Done() - }() - } - for i := idx; i < len(blockHashes); i++ { - if blockMatches[i] != nil { - // Already fetched this block - continue } - c <- i + wg.Done() + }() + } + for i := 0; i < len(blockHashes); i++ { + if blockMatches[i] != nil { + // Already fetched this block + continue } - close(c) - wg.Wait() + c <- i + } + close(c) + wg.Wait() - if len(fmatches) != 0 { - PickPeer: - for { - if err := ctx.Err(); err != nil { - return err - } - rp, err := s.waitForRemote(ctx, pickAny, true) + if len(fmatches) != 0 { + PickPeer: + for { + if err := ctx.Err(); err != nil { + return err + } + rp, err := s.waitForRemote(ctx, pickAny, true) + if err != nil { + return err + } + + blocks, err := rp.Blocks(ctx, fmatches) + if err != nil { + continue PickPeer + } + + for j, b := range blocks { + // Validate fetched blocks before rescanning transactions. PoW + // and PoS difficulties have already been validated since the + // header is saved by the wallet, and modifications to these in + // the downloaded block would result in a different block hash + // and failure to fetch the block. + // + // Block filters were also validated + // against the header (assuming dcp0005 + // was activated). + err = validate.MerkleRoots(b) if err != nil { - return err + err = validate.DCP0005MerkleRoot(b) } - - blocks, err := rp.Blocks(ctx, fmatches) if err != nil { + err := errors.E(op, err) + rp.Disconnect(err) + rp = nil continue PickPeer } - for j, b := range blocks { - // Validate fetched blocks before rescanning transactions. PoW - // and PoS difficulties have already been validated since the - // header is saved by the wallet, and modifications to these in - // the downloaded block would result in a different block hash - // and failure to fetch the block. - // - // Block filters were also validated - // against the header (assuming dcp0005 - // was activated). - err = validate.MerkleRoots(b) - if err != nil { - err = validate.DCP0005MerkleRoot(b) - } - if err != nil { - err := errors.E(op, err) - rp.Disconnect(err) - rp = nil - continue PickPeer - } - - i := fmatchidx[j] - blockMatches[i] = b - } - break + i := fmatchidx[j] + blockMatches[i] = b } + break } + } - for i := idx; i < len(blockMatches); i++ { - b := blockMatches[i] - if b == nil { - // No filter match, skip block - continue - } - - if err := ctx.Err(); err != nil { - return err - } + for i := 0; i < len(blockMatches); i++ { + b := blockMatches[i] + if b == nil { + // No filter match, skip block + continue + } - matchedTxs, fadded := s.rescanBlock(b) - if len(matchedTxs) != 0 { - err := save(&blockHashes[i], matchedTxs) - if err != nil { - return err - } + if err := ctx.Err(); err != nil { + return err + } - // Check for more matched blocks using updated filters, - // starting at the next block. - if len(fadded) != 0 { - idx = i + 1 - filterData = fadded - continue FilterLoop - } + matchedTxs := s.rescanBlock(b) + if len(matchedTxs) != 0 { + err := save(&blockHashes[i], matchedTxs) + if err != nil { + return err } } - return nil } return nil diff --git a/spv/rescan.go b/spv/rescan.go index 030be3211..51ba69e64 100644 --- a/spv/rescan.go +++ b/spv/rescan.go @@ -1,5 +1,5 @@ // Copyright (c) 2013-2014 The btcsuite developers -// Copyright (c) 2018-2021 The Decred developers +// Copyright (c) 2018-2024 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -7,7 +7,6 @@ package spv import ( "github.com/decred/dcrd/blockchain/stake/v5" - "github.com/decred/dcrd/gcs/v4/blockcf2" "github.com/decred/dcrd/txscript/v4/stdscript" "github.com/decred/dcrd/wire" ) @@ -15,11 +14,10 @@ import ( // rescanCheckTransaction is a helper function to rescan both stake and regular // transactions in a block. It appends transactions that match the filters to // *matches, while updating the filters to add outpoints for new UTXOs -// controlled by this wallet. New data added to the Syncer's filters is also -// added to fadded. +// controlled by this wallet. // // This function may only be called with the filter mutex held. -func (s *Syncer) rescanCheckTransactions(matches *[]*wire.MsgTx, fadded *blockcf2.Entries, txs []*wire.MsgTx, tree int8) { +func (s *Syncer) rescanCheckTransactions(matches *[]*wire.MsgTx, txs []*wire.MsgTx, tree int8) { for i, tx := range txs { // Keep track of whether the transaction has already been added // to the result. It shouldn't be added twice. @@ -78,14 +76,13 @@ func (s *Syncer) rescanCheckTransactions(matches *[]*wire.MsgTx, fadded *blockcf } // rescanBlock rescans a block for any relevant transactions for the passed -// lookup keys. Returns any discovered transactions and any new data added to -// the filter. -func (s *Syncer) rescanBlock(block *wire.MsgBlock) (matches []*wire.MsgTx, fadded blockcf2.Entries) { +// lookup keys. Returns any discovered transactions. +func (s *Syncer) rescanBlock(block *wire.MsgBlock) (matches []*wire.MsgTx) { s.filterMu.Lock() - s.rescanCheckTransactions(&matches, &fadded, block.STransactions, wire.TxTreeStake) - s.rescanCheckTransactions(&matches, &fadded, block.Transactions, wire.TxTreeRegular) + s.rescanCheckTransactions(&matches, block.STransactions, wire.TxTreeStake) + s.rescanCheckTransactions(&matches, block.Transactions, wire.TxTreeRegular) s.filterMu.Unlock() - return matches, fadded + return matches } // filterRelevant filters out all transactions considered irrelevant diff --git a/spv/sync.go b/spv/sync.go index 52533f592..f3862fb85 100644 --- a/spv/sync.go +++ b/spv/sync.go @@ -1,4 +1,4 @@ -// Copyright (c) 2018-2021 The Decred developers +// Copyright (c) 2018-2024 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. @@ -1003,90 +1003,80 @@ func (s *Syncer) scanChain(ctx context.Context, rp *p2p.RemotePeer, chain []*wal } } - idx := 0 -FilterLoop: - for idx < len(chain) { - var fmatches []*chainhash.Hash - var fmatchidx []int - var fmatchMu sync.Mutex - - // Scan remaining filters with up to ncpu workers - c := make(chan int) - var wg sync.WaitGroup - worker := func() { - for i := range c { - n := chain[i] - f := n.FilterV2 - k := blockcf2.Key(&n.Header.MerkleRoot) - if f.N() != 0 && f.MatchAny(k, filterData) { - fmatchMu.Lock() - fmatches = append(fmatches, n.Hash) - fmatchidx = append(fmatchidx, i) - fmatchMu.Unlock() - } + var fmatches []*chainhash.Hash + var fmatchidx []int + var fmatchMu sync.Mutex + + // Scan filters with up to ncpu workers + c := make(chan int) + var wg sync.WaitGroup + worker := func() { + for i := range c { + n := chain[i] + f := n.FilterV2 + k := blockcf2.Key(&n.Header.MerkleRoot) + if f.N() != 0 && f.MatchAny(k, filterData) { + fmatchMu.Lock() + fmatches = append(fmatches, n.Hash) + fmatchidx = append(fmatchidx, i) + fmatchMu.Unlock() } - wg.Done() } - nworkers := 0 - for i := idx; i < len(chain); i++ { - if fetched[i] != nil { - continue // Already have block - } - select { - case c <- i: - default: - if nworkers < runtime.NumCPU() { - nworkers++ - wg.Add(1) - go worker() - } - c <- i + wg.Done() + } + nworkers := 0 + for i := 0; i < len(chain); i++ { + if fetched[i] != nil { + continue // Already have block + } + select { + case c <- i: + default: + if nworkers < runtime.NumCPU() { + nworkers++ + wg.Add(1) + go worker() } + c <- i + } + } + close(c) + wg.Wait() + + if len(fmatches) != 0 { + blocks, err := rp.Blocks(ctx, fmatches) + if err != nil { + return nil, err } - close(c) - wg.Wait() + for j, b := range blocks { + i := fmatchidx[j] - if len(fmatches) != 0 { - blocks, err := rp.Blocks(ctx, fmatches) + // Perform context-free validation on the block. + // Disconnect peer when invalid. + err := validate.MerkleRoots(b) if err != nil { + err = validate.DCP0005MerkleRoot(b) + } + if err != nil { + rp.Disconnect(err) return nil, err } - for j, b := range blocks { - i := fmatchidx[j] - // Perform context-free validation on the block. - // Disconnect peer when invalid. - err := validate.MerkleRoots(b) - if err != nil { - err = validate.DCP0005MerkleRoot(b) - } - if err != nil { - rp.Disconnect(err) - return nil, err - } - - fetched[i] = b - } + fetched[i] = b } + } - if err := ctx.Err(); err != nil { - return nil, err - } + if err := ctx.Err(); err != nil { + return nil, err + } - for i := idx; i < len(chain); i++ { - b := fetched[i] - if b == nil { - continue - } - matches, fadded := s.rescanBlock(b) - found[*chain[i].Hash] = matches - if len(fadded) != 0 { - idx = i + 1 - filterData = fadded - continue FilterLoop - } + for i := 0; i < len(chain); i++ { + b := fetched[i] + if b == nil { + continue } - return found, nil + matches := s.rescanBlock(b) + found[*chain[i].Hash] = matches } return found, nil }