log: add an interest cache for logs emitted through the log crate (#1636)

## Motivation

We use `tracing` as our logger in
[`substrate`](https://github.com/paritytech/substrate). We've noticed
that as soon as *any* `trace` log is enabled (even one which doesn't
exists) the whole logging machinery starts to take a lot of time, even
if nothing at all is actually printed!

In one of our quick-and-dirty reproduction benchmarks (JIT-ing a WASM
program) we saw the total real runtime rise from around ~1.3s to ~7s
just by adding a `trace` log filter which doesn't match anything.
(Depending on the hardware and on how many threads are simultaneously
logging this figure varies pretty widely, but it's always a very
significant drop.)

After looking into this problem I've found that the culprit of the
slowdown were `trace!` and `debug!` logs sprinkled quite liberally in
some of the more hot codepaths. When there are no `trace`-level filters
defined on the logger it can basically reject those inert `trace!` and
`debug!` logs purely based on the current maximum logging level (which
is cheap!), but as soon as you define *any* trace filter the current
maximum logging changes, and then every `trace!` and `debug!` log has to
go through the whole filtering machinery before it can be rejected.
While this is cheap if you only do it once, it starts to become very
expensive when you do it a lot, especially when you're running multiple
threads and enable log reloading. (This is related to
https://github.com/tokio-rs/tracing/issues/1632.)

## Solution

I've added an opt-in per-thread LRU cache which tries to cache whenever
the logger is actually interested in a given `target` + `level` pair for
every log emitted through the `log` crate.

I've also added a benchmark very roughly replicating the situation from
our code; here's the performance *without* the cache: (`cargo bench`)

```
[838.67 ns 846.51 ns 854.04 ns]
```

And here's the performance *with* the cache: (`cargo bench --features
interest-cache`)

```
[25.322 ns 25.556 ns 25.820 ns]
```

As you can see the per-call cost was cut down to less than ~3%.
This commit is contained in:
Koute 2021-10-19 01:23:35 +09:00 committed by GitHub
parent a10ddf8aa3
commit 4e5f0f06ab
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 705 additions and 3 deletions

View File

@ -21,15 +21,20 @@ default = ["log-tracer", "trace-logger", "std"]
std = ["log/std"]
log-tracer = []
trace-logger = []
interest-cache = ["lru", "ahash"]
[dependencies]
tracing-core = { path = "../tracing-core", version = "0.1.17"}
log = { version = "0.4" }
lazy_static = "1.3.0"
env_logger = { version = "0.7", optional = true }
lru = { version = "0.7.0", optional = true }
ahash = { version = "0.7.4", optional = true }
[dev-dependencies]
tracing = { path = "../tracing", version = "0.1"}
tracing-subscriber = { path = "../tracing-subscriber" }
criterion = { version = "0.3", default_features = false }
[badges]
maintenance = { status = "actively-maintained" }
@ -37,3 +42,7 @@ maintenance = { status = "actively-maintained" }
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[[bench]]
name = "logging"
harness = false

View File

@ -0,0 +1,91 @@
use criterion::{criterion_group, criterion_main, Criterion};
use log::trace;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::time::{Duration, Instant};
use tracing_subscriber::{EnvFilter, FmtSubscriber};
// This creates a bunch of threads and makes sure they start executing
// a given callback almost exactly at the same time.
fn run_on_many_threads<F, R>(thread_count: usize, callback: F) -> Vec<R>
where
F: Fn() -> R + 'static + Send + Clone,
R: Send + 'static,
{
let started_count = Arc::new(AtomicUsize::new(0));
let barrier = Arc::new(AtomicBool::new(false));
#[allow(clippy::needless_collect)]
let threads: Vec<_> = (0..thread_count)
.map(|_| {
let started_count = started_count.clone();
let barrier = barrier.clone();
let callback = callback.clone();
std::thread::spawn(move || {
started_count.fetch_add(1, Ordering::SeqCst);
while !barrier.load(Ordering::SeqCst) {
std::thread::yield_now();
}
callback()
})
})
.collect();
while started_count.load(Ordering::SeqCst) != thread_count {
std::thread::yield_now();
}
barrier.store(true, Ordering::SeqCst);
threads
.into_iter()
.map(|handle| handle.join())
.collect::<Result<Vec<R>, _>>()
.unwrap()
}
fn bench_logger(c: &mut Criterion) {
let env_filter = EnvFilter::default()
.add_directive("info".parse().unwrap())
.add_directive("ws=off".parse().unwrap())
.add_directive("yamux=off".parse().unwrap())
.add_directive("regalloc=off".parse().unwrap())
.add_directive("cranelift_codegen=off".parse().unwrap())
.add_directive("cranelift_wasm=warn".parse().unwrap())
.add_directive("hyper=warn".parse().unwrap())
.add_directive("dummy=trace".parse().unwrap());
let builder = tracing_log::LogTracer::builder().with_max_level(log::LevelFilter::Trace);
#[cfg(feature = "interest-cache")]
let builder = builder.with_interest_cache(tracing_log::InterestCacheConfig::default());
builder.init().unwrap();
let builder = FmtSubscriber::builder()
.with_env_filter(env_filter)
.with_filter_reloading();
let subscriber = builder.finish();
tracing::subscriber::set_global_default(subscriber).unwrap();
const THREAD_COUNT: usize = 8;
c.bench_function("log_from_multiple_threads", |b| {
b.iter_custom(|count| {
let durations = run_on_many_threads(THREAD_COUNT, move || {
let start = Instant::now();
for _ in 0..count {
trace!("A dummy log!");
}
start.elapsed()
});
let total_time: Duration = durations.into_iter().sum();
Duration::from_nanos((total_time.as_nanos() / THREAD_COUNT as u128) as u64)
})
});
}
criterion_group!(benches, bench_logger);
criterion_main!(benches);

View File

@ -0,0 +1,542 @@
use ahash::AHasher;
use log::{Level, Metadata};
use lru::LruCache;
use std::cell::RefCell;
use std::hash::Hasher;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Mutex;
/// The interest cache configuration.
#[derive(Debug)]
pub struct InterestCacheConfig {
min_verbosity: Level,
lru_cache_size: usize,
}
impl Default for InterestCacheConfig {
fn default() -> Self {
InterestCacheConfig {
min_verbosity: Level::Debug,
lru_cache_size: 1024,
}
}
}
impl InterestCacheConfig {
fn disabled() -> Self {
Self {
lru_cache_size: 0,
..Self::default()
}
}
}
impl InterestCacheConfig {
/// Sets the minimum logging verbosity for which the cache will apply.
///
/// The interest for logs with a lower verbosity than specified here
/// will not be cached.
///
/// It should be set to the lowest verbosity level for which the majority
/// of the logs in your application are usually *disabled*.
///
/// In normal circumstances with typical logger usage patterns
/// you shouldn't ever have to change this.
///
/// By default this is set to `Debug`.
pub fn with_min_verbosity(mut self, level: Level) -> Self {
self.min_verbosity = level;
self
}
/// Sets the number of entries in the LRU cache used to cache interests
/// for `log` records.
///
/// The bigger the cache, the more unlikely it will be for the interest
/// in a given callsite to be recalculated, at the expense of extra
/// memory usage per every thread which tries to log events.
///
/// Every unique [level] + [target] pair consumes a single slot
/// in the cache. Entries will be added to the cache until its size
/// reaches the value configured here, and from then on it will evict
/// the least recently seen level + target pair when adding a new entry.
///
/// The ideal value to set here widely depends on how much exactly
/// you're logging, and how diverse the targets are to which you are logging.
///
/// If your application spends a significant amount of time filtering logs
/// which are *not* getting printed out then increasing this value will most
/// likely help.
///
/// Setting this to zero will disable the cache.
///
/// By default this is set to 1024.
///
/// [level]: log::Metadata::level
/// [target]: log::Metadata::target
pub fn with_lru_cache_size(mut self, size: usize) -> Self {
self.lru_cache_size = size;
self
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
struct Key {
target_address: usize,
level_and_length: usize,
}
struct State {
min_verbosity: Level,
epoch: usize,
cache: LruCache<Key, u64, ahash::RandomState>,
}
impl State {
fn new(epoch: usize, config: &InterestCacheConfig) -> Self {
State {
epoch,
min_verbosity: config.min_verbosity,
cache: LruCache::new(config.lru_cache_size),
}
}
}
// When the logger's filters are reconfigured the interest cache in core is cleared,
// and we also want to get notified when that happens so that we can clear our cache too.
//
// So what we do here is to register a dummy callsite with the core, just so that we can be
// notified when that happens. It doesn't really matter how exactly our dummy callsite looks
// like and whether subscribers will actually be interested in it, since nothing will actually
// be logged from it.
static INTEREST_CACHE_EPOCH: AtomicUsize = AtomicUsize::new(0);
fn interest_cache_epoch() -> usize {
INTEREST_CACHE_EPOCH.load(Ordering::Relaxed)
}
struct SentinelCallsite;
impl tracing_core::Callsite for SentinelCallsite {
fn set_interest(&self, _: tracing_core::subscriber::Interest) {
INTEREST_CACHE_EPOCH.fetch_add(1, Ordering::SeqCst);
}
fn metadata(&self) -> &tracing_core::Metadata<'_> {
&SENTINEL_METADATA
}
}
static SENTINEL_CALLSITE: SentinelCallsite = SentinelCallsite;
static SENTINEL_METADATA: tracing_core::Metadata<'static> = tracing_core::Metadata::new(
"log interest cache",
"log",
tracing_core::Level::ERROR,
None,
None,
None,
tracing_core::field::FieldSet::new(&[], tracing_core::identify_callsite!(&SENTINEL_CALLSITE)),
tracing_core::metadata::Kind::EVENT,
);
lazy_static::lazy_static! {
static ref CONFIG: Mutex<InterestCacheConfig> = {
tracing_core::callsite::register(&SENTINEL_CALLSITE);
Mutex::new(InterestCacheConfig::disabled())
};
}
thread_local! {
static STATE: RefCell<State> = {
let config = CONFIG.lock().unwrap();
RefCell::new(State::new(interest_cache_epoch(), &config))
};
}
pub(crate) fn configure(new_config: Option<InterestCacheConfig>) {
*CONFIG.lock().unwrap() = new_config.unwrap_or_else(InterestCacheConfig::disabled);
INTEREST_CACHE_EPOCH.fetch_add(1, Ordering::SeqCst);
}
pub(crate) fn try_cache(metadata: &Metadata<'_>, callback: impl FnOnce() -> bool) -> bool {
STATE.with(|state| {
let mut state = state.borrow_mut();
// If the interest cache in core was rebuilt we need to reset the cache here too.
let epoch = interest_cache_epoch();
if epoch != state.epoch {
*state = State::new(epoch, &CONFIG.lock().unwrap());
}
let level = metadata.level();
if state.cache.cap() == 0 || level < state.min_verbosity {
return callback();
}
let target = metadata.target();
let mut hasher = AHasher::default();
hasher.write(target.as_bytes());
const HASH_MASK: u64 = !1;
const INTEREST_MASK: u64 = 1;
// We mask out the least significant bit of the hash since we'll use
// that space to save the interest.
//
// Since we use a good hashing function the loss of only a single bit
// won't really affect us negatively.
let target_hash = hasher.finish() & HASH_MASK;
// Since log targets are usually static strings we just use the address of the pointer
// as the key for our cache.
//
// We want each level to be cached separately so we also use the level as key, and since
// some linkers at certain optimization levels deduplicate strings if their prefix matches
// (e.g. "ham" and "hamster" might actually have the same address in memory) we also use the length.
let key = Key {
target_address: target.as_ptr() as usize,
// For extra efficiency we pack both the level and the length into a single field.
// The `level` can be between 1 and 5, so it can take at most 3 bits of space.
level_and_length: level as usize | target.len().wrapping_shl(3),
};
if let Some(&cached) = state.cache.get(&key) {
// And here we make sure that the target actually matches.
//
// This is just a hash of the target string, so theoretically we're not guaranteed
// that it won't collide, however in practice it shouldn't matter as it is quite
// unlikely that the target string's address and its length and the level and
// the hash will *all* be equal at the same time.
//
// We could of course actually store the whole target string in our cache,
// but we really want to avoid doing that as the necessary memory allocations
// would completely tank our performance, especially in cases where the cache's
// size is too small so it needs to regularly replace entries.
if cached & HASH_MASK == target_hash {
return (cached & INTEREST_MASK) != 0;
}
// Realistically we should never land here, unless someone is using a non-static
// target string with the same length and level, or is very lucky and found a hash
// collision for the cache's key.
}
let interest = callback();
state.cache.put(key, target_hash | interest as u64);
interest
})
}
#[cfg(test)]
mod tests {
use super::*;
fn lock_for_test() -> impl Drop {
// We need to make sure only one test runs at a time.
lazy_static::lazy_static! {
static ref LOCK: Mutex<()> = Mutex::new(());
}
match LOCK.lock() {
Ok(guard) => guard,
Err(poison) => poison.into_inner(),
}
}
#[test]
fn test_when_disabled_the_callback_is_always_called() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::disabled();
std::thread::spawn(|| {
let metadata = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy")
.build();
let mut count = 0;
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 2);
})
.join()
.unwrap();
}
#[test]
fn test_when_enabled_the_callback_is_called_only_once_for_a_high_enough_verbosity() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata = log::MetadataBuilder::new()
.level(Level::Debug)
.target("dummy")
.build();
let mut count = 0;
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
})
.join()
.unwrap();
}
#[test]
fn test_when_core_interest_cache_is_rebuilt_this_cache_is_also_flushed() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata = log::MetadataBuilder::new()
.level(Level::Debug)
.target("dummy")
.build();
{
let mut count = 0;
try_cache(&metadata, || {
count += 1;
true
});
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
}
tracing_core::callsite::rebuild_interest_cache();
{
let mut count = 0;
try_cache(&metadata, || {
count += 1;
true
});
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
}
})
.join()
.unwrap();
}
#[test]
fn test_when_enabled_the_callback_is_always_called_for_a_low_enough_verbosity() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata = log::MetadataBuilder::new()
.level(Level::Info)
.target("dummy")
.build();
let mut count = 0;
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 1);
try_cache(&metadata, || {
count += 1;
true
});
assert_eq!(count, 2);
})
.join()
.unwrap();
}
#[test]
fn test_different_log_levels_are_cached_separately() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata_debug = log::MetadataBuilder::new()
.level(Level::Debug)
.target("dummy")
.build();
let metadata_trace = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy")
.build();
let mut count_debug = 0;
let mut count_trace = 0;
try_cache(&metadata_debug, || {
count_debug += 1;
true
});
try_cache(&metadata_trace, || {
count_trace += 1;
true
});
try_cache(&metadata_debug, || {
count_debug += 1;
true
});
try_cache(&metadata_trace, || {
count_trace += 1;
true
});
assert_eq!(count_debug, 1);
assert_eq!(count_trace, 1);
})
.join()
.unwrap();
}
#[test]
fn test_different_log_targets_are_cached_separately() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata_1 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_1")
.build();
let metadata_2 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_2")
.build();
let mut count_1 = 0;
let mut count_2 = 0;
try_cache(&metadata_1, || {
count_1 += 1;
true
});
try_cache(&metadata_2, || {
count_2 += 1;
true
});
try_cache(&metadata_1, || {
count_1 += 1;
true
});
try_cache(&metadata_2, || {
count_2 += 1;
true
});
assert_eq!(count_1, 1);
assert_eq!(count_2, 1);
})
.join()
.unwrap();
}
#[test]
fn test_when_cache_runs_out_of_space_the_callback_is_called_again() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default()
.with_min_verbosity(Level::Debug)
.with_lru_cache_size(1);
std::thread::spawn(|| {
let metadata_1 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_1")
.build();
let metadata_2 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_2")
.build();
let mut count = 0;
try_cache(&metadata_1, || {
count += 1;
true
});
try_cache(&metadata_1, || {
count += 1;
true
});
assert_eq!(count, 1);
try_cache(&metadata_2, || true);
try_cache(&metadata_1, || {
count += 1;
true
});
assert_eq!(count, 2);
})
.join()
.unwrap();
}
#[test]
fn test_cache_returns_previously_computed_value() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let metadata_1 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_1")
.build();
let metadata_2 = log::MetadataBuilder::new()
.level(Level::Trace)
.target("dummy_2")
.build();
try_cache(&metadata_1, || true);
assert_eq!(try_cache(&metadata_1, || { unreachable!() }), true);
try_cache(&metadata_2, || false);
assert_eq!(try_cache(&metadata_2, || { unreachable!() }), false);
})
.join()
.unwrap();
}
#[test]
fn test_cache_handles_non_static_target_string() {
let _lock = lock_for_test();
*CONFIG.lock().unwrap() = InterestCacheConfig::default().with_min_verbosity(Level::Debug);
std::thread::spawn(|| {
let mut target = *b"dummy_1";
let metadata_1 = log::MetadataBuilder::new()
.level(Level::Trace)
.target(std::str::from_utf8(&target).unwrap())
.build();
try_cache(&metadata_1, || true);
assert_eq!(try_cache(&metadata_1, || { unreachable!() }), true);
*target.last_mut().unwrap() = b'2';
let metadata_2 = log::MetadataBuilder::new()
.level(Level::Trace)
.target(std::str::from_utf8(&target).unwrap())
.build();
try_cache(&metadata_2, || false);
assert_eq!(try_cache(&metadata_2, || { unreachable!() }), false);
})
.join()
.unwrap();
}
}

View File

@ -74,6 +74,8 @@
//! * `log-tracer`: enables the `LogTracer` type (on by default)
//! * `env_logger`: enables the `env_logger` module, with helpers for working
//! with the [`env_logger` crate].
//! * `interest-cache`: makes it possible to configure an interest cache for
//! logs emitted through the `log` crate (see [`Builder::with_interest_cache`]); requires `std`
//!
//! ## Supported Rust Versions
//!
@ -105,6 +107,7 @@
//! [`Subscriber`]: https://docs.rs/tracing/latest/tracing/trait.Subscriber.html
//! [`tracing::Event`]: https://docs.rs/tracing/latest/tracing/struct.Event.html
//! [flags]: https://docs.rs/tracing/latest/tracing/#crate-feature-flags
//! [`Builder::with_interest_cache`]: log_tracer::Builder::with_interest_cache
#![doc(html_root_url = "https://docs.rs/tracing-log/0.1.2")]
#![doc(
html_logo_url = "https://raw.githubusercontent.com/tokio-rs/tracing/master/assets/logo-type.png",
@ -175,6 +178,16 @@ pub mod env_logger;
pub use log;
#[cfg(all(feature = "interest-cache", feature = "log-tracer", feature = "std"))]
mod interest_cache;
#[cfg(all(feature = "interest-cache", feature = "log-tracer", feature = "std"))]
#[cfg_attr(
docsrs,
doc(cfg(all(feature = "interest-cache", feature = "log-tracer", feature = "std")))
)]
pub use crate::interest_cache::InterestCacheConfig;
/// Format a log record as a trace event in the current span.
pub fn format_trace(record: &log::Record<'_>) -> io::Result<()> {
dispatch_record(record);

View File

@ -39,6 +39,8 @@ pub struct LogTracer {
pub struct Builder {
ignore_crates: Vec<String>,
filter: log::LevelFilter,
#[cfg(all(feature = "interest-cache", feature = "std"))]
interest_cache_config: Option<crate::InterestCacheConfig>,
}
// ===== impl LogTracer =====
@ -156,6 +158,14 @@ impl Default for LogTracer {
}
}
#[cfg(all(feature = "interest-cache", feature = "std"))]
use crate::interest_cache::try_cache as try_cache_interest;
#[cfg(not(all(feature = "interest-cache", feature = "std")))]
fn try_cache_interest(_: &log::Metadata<'_>, callback: impl FnOnce() -> bool) -> bool {
callback()
}
impl log::Log for LogTracer {
fn enabled(&self, metadata: &log::Metadata<'_>) -> bool {
// First, check the log record against the current max level enabled by
@ -178,8 +188,10 @@ impl log::Log for LogTracer {
}
}
try_cache_interest(metadata, || {
// Finally, check if the current `tracing` dispatcher cares about this.
dispatcher::get_default(|dispatch| dispatch.enabled(&metadata.as_trace()))
})
}
fn log(&self, record: &log::Record<'_>) {
@ -236,13 +248,46 @@ impl Builder {
crates.into_iter().fold(self, Self::ignore_crate)
}
/// Configures the `LogTracer` to either disable or enable the interest cache.
///
/// When enabled, a per-thread LRU cache will be used to cache whenever the logger
/// is interested in a given [level] + [target] pair for records generated through
/// the `log` crate.
///
/// When no `trace!` logs are enabled the logger is able to cheaply filter
/// them out just by comparing their log level to the globally specified
/// maximum, and immediately reject them. When *any* other `trace!` log is
/// enabled (even one which doesn't actually exist!) the logger has to run
/// its full filtering machinery on each and every `trace!` log, which can
/// potentially be very expensive.
///
/// Enabling this cache is useful in such situations to improve performance.
///
/// You most likely do not want to enabled this if you have registered any dynamic
/// filters on your logger and you want them to be run every time.
///
/// This is disabled by default.
///
/// [level]: log::Metadata::level
/// [target]: log::Metadata::target
#[cfg(all(feature = "interest-cache", feature = "std"))]
#[cfg_attr(docsrs, doc(cfg(all(feature = "interest-cache", feature = "std"))))]
pub fn with_interest_cache(mut self, config: crate::InterestCacheConfig) -> Self {
self.interest_cache_config = Some(config);
self
}
/// Constructs a new `LogTracer` with the provided configuration and sets it
/// as the default logger.
///
/// Setting a global logger can only be done once.
#[cfg(feature = "std")]
#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
pub fn init(self) -> Result<(), SetLoggerError> {
#[allow(unused_mut)]
pub fn init(mut self) -> Result<(), SetLoggerError> {
#[cfg(all(feature = "interest-cache", feature = "std"))]
crate::interest_cache::configure(self.interest_cache_config.take());
let ignore_crates = self.ignore_crates.into_boxed_slice();
let logger = Box::new(LogTracer { ignore_crates });
log::set_boxed_logger(logger)?;
@ -256,6 +301,8 @@ impl Default for Builder {
Self {
ignore_crates: Vec::new(),
filter: log::LevelFilter::max(),
#[cfg(all(feature = "interest-cache", feature = "std"))]
interest_cache_config: None,
}
}
}