internal: Collect garbage after events when quiescient

This commit is contained in:
Lukas Wirth 2025-12-28 12:30:57 +01:00
parent a3bf20fa00
commit b1afb84586
12 changed files with 31 additions and 69 deletions

View File

@ -1,19 +1,14 @@
//! Applies changes to the IDE state transactionally.
use profile::Bytes;
use salsa::{Database as _, Durability};
use salsa::Database as _;
use crate::{ChangeWithProcMacros, RootDatabase};
impl RootDatabase {
pub fn request_cancellation(&mut self) {
let _p = tracing::info_span!("RootDatabase::request_cancellation").entered();
self.synthetic_write(Durability::LOW);
}
pub fn apply_change(&mut self, change: ChangeWithProcMacros) {
let _p = tracing::info_span!("RootDatabase::apply_change").entered();
self.request_cancellation();
self.trigger_cancellation();
tracing::trace!("apply_change {:?}", change);
change.apply(self);
}

View File

@ -108,10 +108,9 @@ pub fn parallel_prime_caches(
hir::attach_db(&db, || {
// method resolution is likely to hit all trait impls at some point
// we pre-populate it here as this will hit a lot of parses ...
_ = hir::TraitImpls::for_crate(&db, crate_id);
// we compute the lang items here as the work for them is also highly recursive and will be trigger by the module symbols query
// This also computes the lang items, which is what we want as the work for them is also highly recursive and will be trigger by the module symbols query
// slowing down leaf crate analysis tremendously as we go back to being blocked on a single thread
_ = hir::crate_lang_items(&db, crate_id);
_ = hir::TraitImpls::for_crate(&db, crate_id);
})
});
@ -271,7 +270,6 @@ pub fn parallel_prime_caches(
}
if crate_def_maps_done == crate_def_maps_total {
// Can we trigger lru-eviction once at this point to reduce peak memory usage?
cb(ParallelPrimeCachesProgress {
crates_currently_indexing: vec![],
crates_done: crate_def_maps_done,

View File

@ -67,7 +67,7 @@ use ide_db::{
FxHashMap, FxIndexSet, LineIndexDatabase,
base_db::{
CrateOrigin, CrateWorkspaceData, Env, FileSet, RootQueryDb, SourceDatabase, VfsPath,
salsa::Cancelled,
salsa::{Cancelled, Database},
},
prime_caches, symbol_index,
};
@ -199,8 +199,13 @@ impl AnalysisHost {
pub fn per_query_memory_usage(&mut self) -> Vec<(String, profile::Bytes, usize)> {
self.db.per_query_memory_usage()
}
pub fn request_cancellation(&mut self) {
self.db.request_cancellation();
pub fn trigger_cancellation(&mut self) {
self.db.trigger_cancellation();
}
pub fn trigger_garbage_collection(&mut self) {
self.db.trigger_lru_eviction();
// SAFETY: `trigger_lru_eviction` triggers cancellation, so all running queries were canceled.
unsafe { hir::collect_ty_garbage() };
}
pub fn raw_database(&self) -> &RootDatabase {
&self.db

View File

@ -354,11 +354,10 @@ impl flags::AnalysisStats {
self.run_term_search(&workspace, db, &vfs, &file_ids, verbosity);
}
hir::clear_tls_solver_cache();
unsafe { hir::collect_ty_garbage() };
let db = host.raw_database_mut();
db.trigger_lru_eviction();
hir::clear_tls_solver_cache();
unsafe { hir::collect_ty_garbage() };
let total_span = analysis_sw.elapsed();
eprintln!("{:<20} {total_span}", "Total:");

View File

@ -185,7 +185,7 @@ impl Tester {
if !worker.is_finished() {
// attempt to cancel the worker, won't work for chalk hangs unfortunately
self.host.request_cancellation();
self.host.trigger_garbage_collection();
}
worker.join().and_then(identity)
});

View File

@ -98,13 +98,6 @@ config_data! {
/// Code's `files.watcherExclude`.
files_exclude | files_excludeDirs: Vec<Utf8PathBuf> = vec![],
/// This config controls the frequency in which rust-analyzer will perform its internal Garbage
/// Collection. It is specified in revisions, roughly equivalent to number of changes. The default
/// is 1000.
///
/// Setting a smaller value may help limit peak memory usage at the expense of speed.
gc_frequency: usize = 1000,
/// If this is `true`, when "Goto Implementations" and in "Implementations" lens, are triggered on a `struct` or `enum` or `union`, we filter out trait implementations that originate from `derive`s above the type.
gotoImplementations_filterAdjacentDerives: bool = false,
@ -1712,10 +1705,6 @@ impl Config {
&self.caps
}
pub fn gc_freq(&self) -> usize {
*self.gc_frequency()
}
pub fn assist(&self, source_root: Option<SourceRootId>) -> AssistConfig {
AssistConfig {
snippet_cap: self.snippet_cap(),

View File

@ -193,8 +193,6 @@ pub(crate) struct GlobalState {
/// which will usually end up causing a bunch of incorrect diagnostics on startup.
pub(crate) incomplete_crate_graph: bool,
pub(crate) revisions_until_next_gc: usize,
pub(crate) minicore: MiniCoreRustAnalyzerInternalOnly,
}
@ -321,8 +319,6 @@ impl GlobalState {
incomplete_crate_graph: false,
minicore: MiniCoreRustAnalyzerInternalOnly::default(),
revisions_until_next_gc: config.gc_freq(),
};
// Apply any required database inputs from the config.
this.update_configuration(config);
@ -347,11 +343,11 @@ impl GlobalState {
let (change, modified_rust_files, workspace_structure_change) =
self.cancellation_pool.scoped(|s| {
// start cancellation in parallel, this will kick off lru eviction
// start cancellation in parallel,
// allowing us to do meaningful work while waiting
let analysis_host = AssertUnwindSafe(&mut self.analysis_host);
s.spawn(thread::ThreadIntent::LatencySensitive, || {
{ analysis_host }.0.request_cancellation()
{ analysis_host }.0.trigger_cancellation()
});
// downgrade to read lock to allow more readers while we are normalizing text
@ -440,14 +436,6 @@ impl GlobalState {
self.analysis_host.apply_change(change);
if self.revisions_until_next_gc == 0 {
// SAFETY: Just changed some database inputs, all queries were canceled.
unsafe { hir::collect_ty_garbage() };
self.revisions_until_next_gc = self.config.gc_freq();
} else {
self.revisions_until_next_gc -= 1;
}
if !modified_ratoml_files.is_empty()
|| !self.config.same_source_root_parent_map(&self.local_roots_parent_map)
{
@ -741,7 +729,7 @@ impl GlobalState {
impl Drop for GlobalState {
fn drop(&mut self) {
self.analysis_host.request_cancellation();
self.analysis_host.trigger_cancellation();
}
}

View File

@ -9,7 +9,7 @@ use std::{
};
use crossbeam_channel::{Receiver, never, select};
use ide_db::base_db::{SourceDatabase, VfsPath, salsa::Database as _};
use ide_db::base_db::{SourceDatabase, VfsPath};
use lsp_server::{Connection, Notification, Request};
use lsp_types::{TextDocumentIdentifier, notification::Notification as _};
use stdx::thread::ThreadIntent;
@ -383,7 +383,7 @@ impl GlobalState {
));
}
PrimeCachesProgress::End { cancelled } => {
self.analysis_host.raw_database_mut().trigger_lru_eviction();
self.analysis_host.trigger_garbage_collection();
self.prime_caches_queue.op_completed(());
if cancelled {
self.prime_caches_queue
@ -535,6 +535,11 @@ impl GlobalState {
if project_or_mem_docs_changed && self.config.test_explorer() {
self.update_tests();
}
// no work is currently being done, now we can block a bit and clean up our garbage
if self.task_pool.handle.is_empty() && self.fmt_pool.handle.is_empty() {
self.analysis_host.trigger_garbage_collection();
}
}
self.cleanup_discover_handles();

View File

@ -43,6 +43,10 @@ impl<T> TaskPool<T> {
pub(crate) fn len(&self) -> usize {
self.pool.len()
}
pub(crate) fn is_empty(&self) -> bool {
self.pool.is_empty()
}
}
/// `DeferredTaskQueue` holds deferred tasks.

View File

@ -66,7 +66,6 @@ impl Pool {
job.requested_intent.apply_to_current_thread();
current_intent = job.requested_intent;
}
extant_tasks.fetch_add(1, Ordering::SeqCst);
// discard the panic, we should've logged the backtrace already
drop(panic::catch_unwind(job.f));
extant_tasks.fetch_sub(1, Ordering::SeqCst);
@ -93,6 +92,7 @@ impl Pool {
});
let job = Job { requested_intent: intent, f };
self.extant_tasks.fetch_add(1, Ordering::SeqCst);
self.job_sender.send(job).unwrap();
}
@ -147,6 +147,7 @@ impl<'scope> Scope<'_, 'scope> {
>(f)
},
};
self.pool.extant_tasks.fetch_add(1, Ordering::SeqCst);
self.pool.job_sender.send(job).unwrap();
}
}

View File

@ -635,17 +635,6 @@ Default: `"client"`
Controls file watching implementation.
## rust-analyzer.gc.frequency {#gc.frequency}
Default: `1000`
This config controls the frequency in which rust-analyzer will perform its internal Garbage
Collection. It is specified in revisions, roughly equivalent to number of changes. The default
is 1000.
Setting a smaller value may help limit peak memory usage at the expense of speed.
## rust-analyzer.gotoImplementations.filterAdjacentDerives {#gotoImplementations.filterAdjacentDerives}
Default: `false`

View File

@ -1632,17 +1632,6 @@
}
}
},
{
"title": "Gc",
"properties": {
"rust-analyzer.gc.frequency": {
"markdownDescription": "This config controls the frequency in which rust-analyzer will perform its internal Garbage\nCollection. It is specified in revisions, roughly equivalent to number of changes. The default\nis 1000.\n\nSetting a smaller value may help limit peak memory usage at the expense of speed.",
"default": 1000,
"type": "integer",
"minimum": 0
}
}
},
{
"title": "Goto Implementations",
"properties": {