From 4421d1fec8d463793976326c7349f130e8e732ef Mon Sep 17 00:00:00 2001 From: Mariusz Reichert Date: Wed, 15 Jan 2025 10:26:36 +0100 Subject: [PATCH] Instrumented macro refactoring --- Cargo.lock | 20 +++--- Cargo.toml | 16 ++--- .../Cargo.toml | 5 +- .../src/lib.rs | 15 +++- src/bin/electrs.rs | 2 +- src/daemon.rs | 68 +++++++++---------- src/electrum/server.rs | 18 ++--- src/lib.rs | 6 -- src/new_index/fetch.rs | 14 ++-- src/new_index/mempool.rs | 41 ++++++----- src/new_index/precache.rs | 6 +- src/new_index/query.rs | 36 +++++----- src/rest.rs | 6 +- src/util/block.rs | 12 ++-- src/util/electrum_merkle.rs | 8 +-- src/util/fees.rs | 4 +- 16 files changed, 140 insertions(+), 137 deletions(-) rename {instrumented_macro => electrs_macros}/Cargo.toml (70%) rename {instrumented_macro => electrs_macros}/src/lib.rs (64%) diff --git a/Cargo.lock b/Cargo.lock index 31b464ebd..23cdb345c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -900,6 +900,7 @@ dependencies = [ "criterion", "crossbeam-channel", "dirs", + "electrs_macros", "electrum-client", "electrumd", "elements", @@ -908,7 +909,6 @@ dependencies = [ "glob", "hyper", "hyperlocal", - "instrumented_macro", "itertools 0.12.1", "lazy_static", "libc", @@ -941,6 +941,15 @@ dependencies = [ "zmq", ] +[[package]] +name = "electrs_macros" +version = "0.1.0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.96", +] + [[package]] name = "electrum-client" version = "0.8.0" @@ -1616,15 +1625,6 @@ dependencies = [ "generic-array", ] -[[package]] -name = "instrumented_macro" -version = "0.1.0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.90", -] - [[package]] name = "ipnet" version = "2.11.0" diff --git a/Cargo.toml b/Cargo.toml index e012f4b5f..a0ed15da7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,4 +1,5 @@ -workspace = { members = ["instrumented_macro"] } +workspace = { members = ["electrs_macros"] } + [package] name = "electrs" version = "0.4.1" @@ -17,17 +18,14 @@ default-run = "electrs" liquid = ["elements"] electrum-discovery = ["electrum-client"] bench = [] -default = ["no-otlp-tracing"] otlp-tracing = [ - "tracing/max_level_trace", + "tracing", "tracing-subscriber", "opentelemetry", "tracing-opentelemetry", "opentelemetry-otlp", - "opentelemetry-semantic-conventions" -] -no-otlp-tracing = [ - "tracing/max_level_off" + "opentelemetry-semantic-conventions", + "electrs_macros/otlp-tracing" ] [dependencies] @@ -71,12 +69,12 @@ tracing-opentelemetry = { version = "0.21.0", optional = true } opentelemetry-otlp = { version = "0.13.0", default-features = false, features = ["http-proto", "reqwest-client"], optional = true } tracing-subscriber = { version = "0.3.17", default-features = false, features = ["env-filter", "fmt"], optional = true } opentelemetry-semantic-conventions = { version = "0.12.0", optional = true } -tracing = { version = "0.1.40", default-features = false, features = ["attributes"] } +tracing = { version = "0.1.40", default-features = false, features = ["attributes"], optional = true } # optional dependencies for electrum-discovery electrum-client = { version = "0.8", optional = true } zmq = "0.10.0" -instrumented_macro = { path = "instrumented_macro" } +electrs_macros = { path = "electrs_macros", default-features = false } [dev-dependencies] bitcoind = { version = "0.36", features = ["25_0"] } diff --git a/instrumented_macro/Cargo.toml b/electrs_macros/Cargo.toml similarity index 70% rename from instrumented_macro/Cargo.toml rename to electrs_macros/Cargo.toml index a97eebf1a..607d53a02 100644 --- a/instrumented_macro/Cargo.toml +++ b/electrs_macros/Cargo.toml @@ -1,11 +1,14 @@ [package] -name = "instrumented_macro" +name = "electrs_macros" version = "0.1.0" edition = "2021" [lib] proc-macro = true +[features] +otlp-tracing = [] + [dependencies] syn = "2.0" quote = "1.0" diff --git a/instrumented_macro/src/lib.rs b/electrs_macros/src/lib.rs similarity index 64% rename from instrumented_macro/src/lib.rs rename to electrs_macros/src/lib.rs index 543f25165..c8cb8447e 100644 --- a/instrumented_macro/src/lib.rs +++ b/electrs_macros/src/lib.rs @@ -1,9 +1,12 @@ use proc_macro::TokenStream; -use quote::quote; -use syn::{parse_macro_input, ItemFn}; + #[proc_macro_attribute] -pub fn instrumented(attr: TokenStream, item: TokenStream) -> TokenStream { +#[cfg(feature = "otlp-tracing")] +pub fn trace(attr: TokenStream, item: TokenStream) -> TokenStream { + use quote::quote; + use syn::{parse_macro_input, ItemFn}; + let additional_fields = if !attr.is_empty() { let attr_tokens: proc_macro2::TokenStream = attr.into(); quote! {, #attr_tokens } @@ -23,4 +26,10 @@ pub fn instrumented(attr: TokenStream, item: TokenStream) -> TokenStream { }; expanded.into() +} + +#[proc_macro_attribute] +#[cfg(not(feature = "otlp-tracing"))] +pub fn trace(_attr: TokenStream, item: TokenStream) -> TokenStream { + item } \ No newline at end of file diff --git a/src/bin/electrs.rs b/src/bin/electrs.rs index 31e2aaf9b..42f5cf024 100644 --- a/src/bin/electrs.rs +++ b/src/bin/electrs.rs @@ -159,7 +159,7 @@ fn main_() { } } -#[cfg(feature = "no-otlp-tracing")] +#[cfg(not(feature = "otlp-tracing"))] fn main() { main_(); } diff --git a/src/daemon.rs b/src/daemon.rs index c84e5d0c9..84b93ac03 100644 --- a/src/daemon.rs +++ b/src/daemon.rs @@ -20,7 +20,7 @@ use bitcoin::consensus::encode::{deserialize, serialize_hex}; #[cfg(feature = "liquid")] use elements::encode::{deserialize, serialize_hex}; -use instrumented_macro::instrumented; +use electrs_macros::trace; use crate::chain::{Block, BlockHash, BlockHeader, Network, Transaction, Txid}; use crate::metrics::{HistogramOpts, HistogramVec, Metrics}; @@ -44,7 +44,7 @@ lazy_static! { const MAX_ATTEMPTS: u32 = 5; const RETRY_WAIT_DURATION: Duration = Duration::from_secs(1); -#[instrumented] +#[trace] fn parse_hash(value: &Value) -> Result where T: FromStr, @@ -58,7 +58,7 @@ where .chain_err(|| format!("non-hex value: {}", value))?) } -#[instrumented] +#[trace] fn header_from_value(value: Value) -> Result { let header_hex = value .as_str() @@ -153,7 +153,7 @@ struct Connection { signal: Waiter, } -#[instrumented] +#[trace] fn tcp_connect(addr: SocketAddr, signal: &Waiter) -> Result { loop { match TcpStream::connect_timeout(&addr, *DAEMON_CONNECTION_TIMEOUT) { @@ -176,7 +176,7 @@ fn tcp_connect(addr: SocketAddr, signal: &Waiter) -> Result { } impl Connection { - #[instrumented] + #[trace] fn new( addr: SocketAddr, cookie_getter: Arc, @@ -196,12 +196,12 @@ impl Connection { }) } - #[instrumented] + #[trace] fn reconnect(&self) -> Result { Connection::new(self.addr, self.cookie_getter.clone(), self.signal.clone()) } - #[instrumented] + #[trace] fn send(&mut self, request: &str) -> Result<()> { let cookie = &self.cookie_getter.get()?; let msg = format!( @@ -215,7 +215,7 @@ impl Connection { }) } - #[instrumented] + #[trace] fn recv(&mut self) -> Result { // TODO: use proper HTTP parser. let mut in_header = true; @@ -381,7 +381,7 @@ impl Daemon { Ok(daemon) } - #[instrumented] + #[trace] pub fn reconnect(&self) -> Result { Ok(Daemon { daemon_dir: self.daemon_dir.clone(), @@ -396,7 +396,7 @@ impl Daemon { }) } - #[instrumented] + #[trace] pub fn list_blk_files(&self) -> Result> { let path = self.blocks_dir.join("blk*.dat"); debug!("listing block files at {:?}", path); @@ -432,7 +432,7 @@ impl Daemon { self.network.magic() } - #[instrumented] + #[trace] fn call_jsonrpc(&self, method: &str, request: &Value) -> Result { let mut conn = self.conn.lock().unwrap(); let timer = self.latency.with_label_values(&[method]).start_timer(); @@ -450,7 +450,7 @@ impl Daemon { Ok(result) } - #[instrumented(method = %method)] + #[trace(method = %method)] fn handle_request(&self, method: &str, params: &Value) -> Result { let id = self.message_id.next(); let req = json!({"method": method, "params": params, "id": id}); @@ -473,12 +473,12 @@ impl Daemon { } } - #[instrumented] + #[trace] fn request(&self, method: &str, params: Value) -> Result { self.retry_request(method, ¶ms) } - #[instrumented] + #[trace] fn retry_reconnect(&self) -> Daemon { // XXX add a max reconnection attempts limit? loop { @@ -493,14 +493,14 @@ impl Daemon { // Send requests in parallel over multiple RPC connections as individual JSON-RPC requests (with no JSON-RPC batching), // buffering the replies into a vector. If any of the requests fail, processing is terminated and an Err is returned. - #[instrumented] + #[trace] fn requests(&self, method: &str, params_list: Vec) -> Result> { self.requests_iter(method, params_list).collect() } // Send requests in parallel over multiple RPC connections, iterating over the results without buffering them. // Errors are included in the iterator and do not terminate other pending requests. - #[instrumented] + #[trace] fn requests_iter<'a>( &'a self, method: &'a str, @@ -523,29 +523,29 @@ impl Daemon { // bitcoind JSONRPC API: - #[instrumented] + #[trace] pub fn getblockchaininfo(&self) -> Result { let info: Value = self.request("getblockchaininfo", json!([]))?; Ok(from_value(info).chain_err(|| "invalid blockchain info")?) } - #[instrumented] + #[trace] fn getnetworkinfo(&self) -> Result { let info: Value = self.request("getnetworkinfo", json!([]))?; Ok(from_value(info).chain_err(|| "invalid network info")?) } - #[instrumented] + #[trace] pub fn getbestblockhash(&self) -> Result { parse_hash(&self.request("getbestblockhash", json!([]))?) } - #[instrumented] + #[trace] pub fn getblockheader(&self, blockhash: &BlockHash) -> Result { header_from_value(self.request("getblockheader", json!([blockhash, /*verbose=*/ false]))?) } - #[instrumented] + #[trace] pub fn getblockheaders(&self, heights: &[usize]) -> Result> { let heights: Vec = heights.iter().map(|height| json!([height])).collect(); let params_list: Vec = self @@ -560,7 +560,7 @@ impl Daemon { Ok(result) } - #[instrumented] + #[trace] pub fn getblock(&self, blockhash: &BlockHash) -> Result { let block = block_from_value(self.request("getblock", json!([blockhash, /*verbose=*/ false]))?)?; @@ -568,12 +568,12 @@ impl Daemon { Ok(block) } - #[instrumented] + #[trace] pub fn getblock_raw(&self, blockhash: &BlockHash, verbose: u32) -> Result { self.request("getblock", json!([blockhash, verbose])) } - #[instrumented] + #[trace] pub fn getblocks(&self, blockhashes: &[BlockHash]) -> Result> { let params_list: Vec = blockhashes .iter() @@ -610,7 +610,7 @@ impl Daemon { /// Fetch the given transactions in parallel over multiple threads and RPC connections, /// ignoring any missing ones and returning whatever is available. - #[instrumented] + #[trace] pub fn gettransactions_available(&self, txids: &[&Txid]) -> Result> { const RPC_INVALID_ADDRESS_OR_KEY: i64 = -5; @@ -635,7 +635,7 @@ impl Daemon { .collect() } - #[instrumented] + #[trace] pub fn gettransaction_raw( &self, txid: &Txid, @@ -645,24 +645,24 @@ impl Daemon { self.request("getrawtransaction", json!([txid, verbose, blockhash])) } - #[instrumented] + #[trace] pub fn getmempooltx(&self, txhash: &Txid) -> Result { let value = self.request("getrawtransaction", json!([txhash, /*verbose=*/ false]))?; tx_from_value(value) } - #[instrumented] + #[trace] pub fn getmempooltxids(&self) -> Result> { let res = self.request("getrawmempool", json!([/*verbose=*/ false]))?; Ok(serde_json::from_value(res).chain_err(|| "invalid getrawmempool reply")?) } - #[instrumented] + #[trace] pub fn broadcast(&self, tx: &Transaction) -> Result { self.broadcast_raw(&serialize_hex(tx)) } - #[instrumented] + #[trace] pub fn broadcast_raw(&self, txhex: &str) -> Result { let txid = self.request("sendrawtransaction", json!([txhex]))?; Ok( @@ -674,7 +674,7 @@ impl Daemon { // Get estimated feerates for the provided confirmation targets using a batch RPC request // Missing estimates are logged but do not cause a failure, whatever is available is returned #[allow(clippy::float_cmp)] - #[instrumented] + #[trace] pub fn estimatesmartfee_batch(&self, conf_targets: &[u16]) -> Result> { let params_list: Vec = conf_targets .iter() @@ -709,7 +709,7 @@ impl Daemon { .collect()) } - #[instrumented] + #[trace] fn get_all_headers(&self, tip: &BlockHash) -> Result> { let info: Value = self.request("getblockheader", json!([tip]))?; let tip_height = info @@ -737,7 +737,7 @@ impl Daemon { } // Returns a list of BlockHeaders in ascending height (i.e. the tip is last). - #[instrumented] + #[trace] pub fn get_new_headers( &self, indexed_headers: &HeaderList, @@ -770,7 +770,7 @@ impl Daemon { Ok(new_headers) } - #[instrumented] + #[trace] pub fn get_relayfee(&self) -> Result { let relayfee = self.getnetworkinfo()?.relayfee; diff --git a/src/electrum/server.rs b/src/electrum/server.rs index d86ca4160..d40eb9767 100644 --- a/src/electrum/server.rs +++ b/src/electrum/server.rs @@ -13,7 +13,7 @@ use crypto::sha2::Sha256; use error_chain::ChainedError; use serde_json::{from_str, Value}; -use instrumented_macro::instrumented; +use electrs_macros::trace; #[cfg(not(feature = "liquid"))] use bitcoin::consensus::encode::serialize_hex; @@ -71,7 +71,7 @@ fn bool_from_value_or(val: Option<&Value>, name: &str, default: bool) -> Result< } // TODO: implement caching and delta updates -#[instrumented] +#[trace] fn get_status_hash(txs: Vec<(Txid, Option)>, query: &Query) -> Option { if txs.is_empty() { None @@ -264,7 +264,7 @@ impl Connection { })) } - #[instrumented] + #[trace] fn blockchain_estimatefee(&self, params: &[Value]) -> Result { let conf_target = usize_from_value(params.get(0), "blocks_count")?; let fee_rate = self @@ -392,7 +392,7 @@ impl Connection { Ok(json!(rawtx.to_lower_hex_string())) } - #[instrumented] + #[trace] fn blockchain_transaction_get_merkle(&self, params: &[Value]) -> Result { let txid = Txid::from(hash_from_value(params.get(0)).chain_err(|| "bad tx_hash")?); let height = usize_from_value(params.get(1), "height")?; @@ -430,7 +430,7 @@ impl Connection { })) } - #[instrumented(method = %method)] + #[trace(method = %method)] fn handle_command(&mut self, method: &str, params: &[Value], id: &Value) -> Result { let timer = self .stats @@ -487,7 +487,7 @@ impl Connection { }) } - #[instrumented] + #[trace] fn update_subscriptions(&mut self) -> Result> { let timer = self .stats @@ -545,7 +545,7 @@ impl Connection { Ok(()) } - #[instrumented] + #[trace] fn handle_replies(&mut self, receiver: Receiver) -> Result<()> { let empty_params = json!([]); loop { @@ -610,7 +610,7 @@ impl Connection { } } - #[instrumented] + #[trace] fn parse_requests(mut reader: BufReader, tx: &SyncSender) -> Result<()> { loop { let mut line = Vec::::new(); @@ -673,7 +673,7 @@ impl Connection { } } -#[instrumented] +#[trace] fn get_history( query: &Query, scripthash: &[u8], diff --git a/src/lib.rs b/src/lib.rs index 1bdad259e..af632d973 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -29,11 +29,5 @@ pub mod util; #[cfg(feature = "liquid")] pub mod elements; -#[cfg(not(any(feature = "otlp-tracing", feature = "no-otlp-tracing")))] -compile_error!("Must enable one of the 'otlp-tracing' or 'no-otlp-tracing' features"); - -#[cfg(all(feature = "otlp-tracing", feature = "no-otlp-tracing"))] -compile_error!("Cannot enable both the 'otlp-tracing' and 'no-otlp-tracing' (default) features"); - #[cfg(feature = "otlp-tracing")] pub mod otlp_trace; diff --git a/src/new_index/fetch.rs b/src/new_index/fetch.rs index d7aaae5fb..fd92ffedc 100644 --- a/src/new_index/fetch.rs +++ b/src/new_index/fetch.rs @@ -14,7 +14,7 @@ use std::path::PathBuf; use std::sync::mpsc::Receiver; use std::thread; -use instrumented_macro::instrumented; +use electrs_macros::trace; use crate::chain::{Block, BlockHash}; use crate::daemon::Daemon; @@ -27,7 +27,7 @@ pub enum FetchFrom { BlkFiles, } -#[instrumented] +#[trace] pub fn start_fetcher( from: FetchFrom, daemon: &Daemon, @@ -70,7 +70,7 @@ impl Fetcher { } } -#[instrumented] +#[trace] fn bitcoind_fetcher( daemon: &Daemon, new_headers: Vec, @@ -109,7 +109,7 @@ fn bitcoind_fetcher( )) } -#[instrumented] +#[trace] fn blkfiles_fetcher( daemon: &Daemon, new_headers: Vec, @@ -157,7 +157,7 @@ fn blkfiles_fetcher( )) } -#[instrumented] +#[trace] fn blkfiles_reader(blk_files: Vec, xor_key: Option<[u8; 8]>) -> Fetcher> { let chan = SyncChannel::new(1); let sender = chan.sender(); @@ -188,7 +188,7 @@ fn blkfile_apply_xor_key(xor_key: [u8; 8], blob: &mut [u8]) { } } -#[instrumented] +#[trace] fn blkfiles_parser(blobs: Fetcher>, magic: u32) -> Fetcher> { let chan = SyncChannel::new(1); let sender = chan.sender(); @@ -207,7 +207,7 @@ fn blkfiles_parser(blobs: Fetcher>, magic: u32) -> Fetcher, magic: u32) -> Result> { let mut cursor = Cursor::new(&blob); let mut slices = vec![]; diff --git a/src/new_index/mempool.rs b/src/new_index/mempool.rs index 3d1b328f2..57a5fa5d8 100644 --- a/src/new_index/mempool.rs +++ b/src/new_index/mempool.rs @@ -11,7 +11,7 @@ use std::iter::FromIterator; use std::sync::{Arc, RwLock}; use std::time::{Duration, Instant}; -use instrumented_macro::instrumented; +use electrs_macros::trace; use crate::chain::{deserialize, BlockHash, Network, OutPoint, Transaction, TxOut, Txid}; use crate::config::Config; use crate::daemon::Daemon; @@ -108,7 +108,7 @@ impl Mempool { self.txstore.get(txid).map(serialize) } - #[instrumented] + #[trace] pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option { self.edges.get(outpoint).map(|(txid, vin)| SpendingInput { txid: *txid, @@ -125,7 +125,7 @@ impl Mempool { Some(self.feeinfo.get(txid)?.fee) } - #[instrumented] + #[trace] pub fn has_unconfirmed_parents(&self, txid: &Txid) -> bool { let tx = match self.txstore.get(txid) { Some(tx) => tx, @@ -136,7 +136,7 @@ impl Mempool { .any(|txin| self.txstore.contains_key(&txin.previous_output.txid)) } - #[instrumented] + #[trace] pub fn history(&self, scripthash: &[u8], limit: usize) -> Vec { let _timer = self.latency.with_label_values(&["history"]).start_timer(); self.history @@ -144,7 +144,7 @@ impl Mempool { .map_or_else(|| vec![], |entries| self._history(entries, limit)) } - #[instrumented] + #[trace] fn _history(&self, entries: &[TxHistoryInfo], limit: usize) -> Vec { entries .iter() @@ -156,7 +156,7 @@ impl Mempool { .collect() } - #[instrumented] + #[trace] pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec { let _timer = self .latency @@ -173,7 +173,7 @@ impl Mempool { } } - #[instrumented] + #[trace] pub fn utxo(&self, scripthash: &[u8]) -> Vec { let _timer = self.latency.with_label_values(&["utxo"]).start_timer(); let entries = match self.history.get(scripthash) { @@ -216,7 +216,7 @@ impl Mempool { .collect() } - #[instrumented] + #[trace] // @XXX avoid code duplication with ChainQuery::stats()? pub fn stats(&self, scripthash: &[u8]) -> ScriptStats { let _timer = self.latency.with_label_values(&["stats"]).start_timer(); @@ -266,14 +266,14 @@ impl Mempool { stats } - #[instrumented] + #[trace] // Get all txids in the mempool pub fn txids(&self) -> Vec<&Txid> { let _timer = self.latency.with_label_values(&["txids"]).start_timer(); self.txstore.keys().collect() } - #[instrumented] + #[trace] // Get an overview of the most recent transactions pub fn recent_txs_overview(&self) -> Vec<&TxOverview> { // We don't bother ever deleting elements from the recent list. @@ -282,17 +282,17 @@ impl Mempool { self.recent.iter().collect() } - #[instrumented] + #[trace] pub fn backlog_stats(&self) -> &BacklogStats { &self.backlog_stats.0 } - #[instrumented] + #[trace] pub fn txids_set(&self) -> HashSet { return HashSet::from_iter(self.txstore.keys().cloned()); } - #[instrumented] + #[trace] pub fn update_backlog_stats(&mut self) { let _timer = self .latency @@ -301,7 +301,7 @@ impl Mempool { self.backlog_stats = (BacklogStats::new(&self.feeinfo), Instant::now()); } - #[instrumented] + #[trace] pub fn add_by_txid(&mut self, daemon: &Daemon, txid: Txid) -> Result<()> { if self.txstore.get(&txid).is_none() { if let Ok(tx) = daemon.getmempooltx(&txid) { @@ -316,7 +316,7 @@ impl Mempool { } } - #[instrumented] + #[trace] fn add(&mut self, txs_map: HashMap) -> Result<()> { self.delta .with_label_values(&["add"]) @@ -429,14 +429,13 @@ impl Mempool { Ok(()) } - #[instrumented] fn lookup_txo(&self, outpoint: &OutPoint) -> Option { self.txstore .get(&outpoint.txid) .and_then(|tx| tx.output.get(outpoint.vout as usize).cloned()) } - #[instrumented] + #[trace] pub fn lookup_txos(&self, outpoints: BTreeSet) -> Result> { let _timer = self .latency @@ -458,7 +457,7 @@ impl Mempool { Ok(txos) } - #[instrumented] + #[trace] fn remove(&mut self, to_remove: HashSet<&Txid>) { self.delta .with_label_values(&["remove"]) @@ -494,7 +493,7 @@ impl Mempool { } #[cfg(feature = "liquid")] - #[instrumented] + #[trace] pub fn asset_history(&self, asset_id: &AssetId, limit: usize) -> Vec { let _timer = self .latency @@ -507,7 +506,7 @@ impl Mempool { /// Sync our local view of the mempool with the bitcoind Daemon RPC. If the chain tip moves before /// the mempool is fetched in full, syncing is aborted and an Ok(false) is returned. - #[instrumented] + #[trace] pub fn update( mempool: &Arc>, daemon: &Daemon, @@ -619,7 +618,7 @@ impl BacklogStats { } } - #[instrumented] + #[trace] fn new(feeinfo: &HashMap) -> Self { let (count, vsize, total_fee) = feeinfo .values() diff --git a/src/new_index/precache.rs b/src/new_index/precache.rs index affef4177..ea4710431 100644 --- a/src/new_index/precache.rs +++ b/src/new_index/precache.rs @@ -13,9 +13,9 @@ use std::io; use std::io::prelude::*; use std::str::FromStr; -use instrumented_macro::instrumented; +use electrs_macros::trace; -#[instrumented] +#[trace] pub fn precache(chain: &ChainQuery, scripthashes: Vec) { let total = scripthashes.len(); info!("Pre-caching stats and utxo set for {} scripthashes", total); @@ -39,7 +39,7 @@ pub fn precache(chain: &ChainQuery, scripthashes: Vec) { }); } -#[instrumented] +#[trace] pub fn scripthashes_from_file(path: String) -> Result> { let reader = io::BufReader::new(File::open(path).chain_err(|| "cannot open precache scripthash file")?); diff --git a/src/new_index/query.rs b/src/new_index/query.rs index 1bc5c91d1..df258bea9 100644 --- a/src/new_index/query.rs +++ b/src/new_index/query.rs @@ -11,7 +11,7 @@ use crate::errors::*; use crate::new_index::{ChainQuery, Mempool, ScriptStats, SpendingInput, Utxo}; use crate::util::{is_spendable, BlockId, Bytes, TransactionStatus}; -use instrumented_macro::instrumented; +use electrs_macros::trace; #[cfg(feature = "liquid")] use crate::{ @@ -71,7 +71,7 @@ impl Query { self.mempool.read().unwrap() } - #[instrumented] + #[trace] pub fn broadcast_raw(&self, txhex: &str) -> Result { let txid = self.daemon.broadcast_raw(txhex)?; let _ = self @@ -82,7 +82,7 @@ impl Query { Ok(txid) } - #[instrumented] + #[trace] pub fn utxo(&self, scripthash: &[u8]) -> Result> { let mut utxos = self.chain.utxo(scripthash, self.config.utxos_limit)?; let mempool = self.mempool(); @@ -91,7 +91,7 @@ impl Query { Ok(utxos) } - #[instrumented] + #[trace] pub fn history_txids(&self, scripthash: &[u8], limit: usize) -> Vec<(Txid, Option)> { let confirmed_txids = self.chain.history_txids(scripthash, limit); let confirmed_len = confirmed_txids.len(); @@ -113,21 +113,21 @@ impl Query { ) } - #[instrumented] + #[trace] pub fn lookup_txn(&self, txid: &Txid) -> Option { self.chain .lookup_txn(txid, None) .or_else(|| self.mempool().lookup_txn(txid)) } - #[instrumented] + #[trace] pub fn lookup_raw_txn(&self, txid: &Txid) -> Option { self.chain .lookup_raw_txn(txid, None) .or_else(|| self.mempool().lookup_raw_txn(txid)) } - #[instrumented] + #[trace] pub fn lookup_txos(&self, outpoints: BTreeSet) -> HashMap { // the mempool lookup_txos() internally looks up confirmed txos as well self.mempool() @@ -135,14 +135,14 @@ impl Query { .expect("failed loading txos") } - #[instrumented] + #[trace] pub fn lookup_spend(&self, outpoint: &OutPoint) -> Option { self.chain .lookup_spend(outpoint) .or_else(|| self.mempool().lookup_spend(outpoint)) } - #[instrumented] + #[trace] pub fn lookup_tx_spends(&self, tx: Transaction) -> Vec> { let txid = tx.compute_txid(); @@ -162,22 +162,22 @@ impl Query { .collect() } - #[instrumented] + #[trace] pub fn get_tx_status(&self, txid: &Txid) -> TransactionStatus { TransactionStatus::from(self.chain.tx_confirming_block(txid)) } - #[instrumented] + #[trace] pub fn get_mempool_tx_fee(&self, txid: &Txid) -> Option { self.mempool().get_tx_fee(txid) } - #[instrumented] + #[trace] pub fn has_unconfirmed_parents(&self, txid: &Txid) -> bool { self.mempool().has_unconfirmed_parents(txid) } - #[instrumented] + #[trace] pub fn estimate_fee(&self, conf_target: u16) -> Option { if self.config.network_type.is_regtest() { return self.get_relayfee().ok(); @@ -197,7 +197,7 @@ impl Query { .copied() } - #[instrumented] + #[trace] pub fn estimate_fee_map(&self) -> HashMap { if let (ref cache, Some(cache_time)) = *self.cached_estimates.read().unwrap() { if cache_time.elapsed() < Duration::from_secs(FEE_ESTIMATES_TTL) { @@ -209,7 +209,7 @@ impl Query { self.cached_estimates.read().unwrap().0.clone() } - #[instrumented] + #[trace] fn update_fee_estimates(&self) { match self.daemon.estimatesmartfee_batch(&CONF_TARGETS) { Ok(estimates) => { @@ -221,7 +221,7 @@ impl Query { } } - #[instrumented] + #[trace] pub fn get_relayfee(&self) -> Result { if let Some(cached) = *self.cached_relayfee.read().unwrap() { return Ok(cached); @@ -252,13 +252,13 @@ impl Query { } #[cfg(feature = "liquid")] - #[instrumented] + #[trace] pub fn lookup_asset(&self, asset_id: &AssetId) -> Result> { lookup_asset(&self, self.asset_db.as_ref(), asset_id, None) } #[cfg(feature = "liquid")] - #[instrumented] + #[trace] pub fn list_registry_assets( &self, start_index: usize, diff --git a/src/rest.rs b/src/rest.rs index 58b58a606..e27b65a71 100644 --- a/src/rest.rs +++ b/src/rest.rs @@ -24,7 +24,7 @@ use tokio::sync::oneshot; use std::fs; use std::str::FromStr; -use instrumented_macro::instrumented; +use electrs_macros::trace; #[cfg(feature = "liquid")] use { @@ -593,7 +593,7 @@ impl Handle { } } -#[instrumented] +#[trace] fn handle_request( method: Method, uri: hyper::Uri, @@ -1167,7 +1167,7 @@ fn json_response(value: T, ttl: u32) -> Result, Htt .unwrap()) } -#[instrumented] +#[trace] fn blocks(query: &Query, start_height: Option) -> Result, HttpError> { let mut values = Vec::new(); let mut current_hash = match start_height { diff --git a/src/util/block.rs b/src/util/block.rs index a8a6aee03..0c0804aa5 100644 --- a/src/util/block.rs +++ b/src/util/block.rs @@ -9,7 +9,7 @@ use std::slice; use time::format_description::well_known::Rfc3339; use time::OffsetDateTime as DateTime; -use instrumented_macro::instrumented; +use electrs_macros::trace; const MTP_SPAN: usize = 11; @@ -94,7 +94,7 @@ impl HeaderList { } } - #[instrumented] + #[trace] pub fn new( mut headers_map: HashMap, tip_hash: BlockHash, @@ -132,7 +132,7 @@ impl HeaderList { headers } - #[instrumented] + #[trace] pub fn order(&self, new_headers: Vec) -> Vec { // header[i] -> header[i-1] (i.e. header.last() is the tip) struct HashedHeader { @@ -172,7 +172,7 @@ impl HeaderList { .collect() } - #[instrumented] + #[trace] pub fn apply(&mut self, new_headers: Vec) { // new_headers[i] -> new_headers[i - 1] (i.e. new_headers.last() is the tip) for i in 1..new_headers.len() { @@ -210,7 +210,7 @@ impl HeaderList { } } - #[instrumented] + #[trace] pub fn header_by_blockhash(&self, blockhash: &BlockHash) -> Option<&HeaderEntry> { let height = self.heights.get(blockhash)?; let header = self.headers.get(*height)?; @@ -221,7 +221,7 @@ impl HeaderList { } } - #[instrumented] + #[trace] pub fn header_by_height(&self, height: usize) -> Option<&HeaderEntry> { self.headers.get(height).map(|entry| { assert_eq!(entry.height(), height); diff --git a/src/util/electrum_merkle.rs b/src/util/electrum_merkle.rs index 8b146705e..52e0a825a 100644 --- a/src/util/electrum_merkle.rs +++ b/src/util/electrum_merkle.rs @@ -3,9 +3,9 @@ use crate::errors::*; use crate::new_index::ChainQuery; use bitcoin::hashes::{sha256d::Hash as Sha256dHash, Hash}; -use instrumented_macro::instrumented; +use electrs_macros::trace; -#[instrumented] +#[trace] pub fn get_tx_merkle_proof( chain: &ChainQuery, tx_hash: &Txid, @@ -24,7 +24,7 @@ pub fn get_tx_merkle_proof( Ok((branch, pos)) } -#[instrumented] +#[trace] pub fn get_header_merkle_proof( chain: &ChainQuery, height: usize, @@ -53,7 +53,7 @@ pub fn get_header_merkle_proof( let header_hashes = header_hashes.into_iter().map(Sha256dHash::from).collect(); Ok(create_merkle_branch_and_root(header_hashes, height)) } -#[instrumented] +#[trace] pub fn get_id_from_pos( chain: &ChainQuery, height: usize, diff --git a/src/util/fees.rs b/src/util/fees.rs index eb3d61b15..0b7f7ada7 100644 --- a/src/util/fees.rs +++ b/src/util/fees.rs @@ -1,7 +1,7 @@ use crate::chain::{Network, Transaction, TxOut}; use std::collections::HashMap; -use instrumented_macro::instrumented; +use electrs_macros::trace; const VSIZE_BIN_WIDTH: u64 = 50_000; // in vbytes @@ -48,7 +48,7 @@ pub fn get_tx_fee(tx: &Transaction, _prevouts: &HashMap, network: N tx.fee_in(*network.native_asset()) } -#[instrumented] +#[trace] pub fn make_fee_histogram(mut entries: Vec<&TxFeeInfo>) -> Vec<(f64, u64)> { entries.sort_unstable_by(|e1, e2| e1.fee_per_vbyte.partial_cmp(&e2.fee_per_vbyte).unwrap());