mirror of
https://github.com/tokio-rs/tokio.git
synced 2025-10-01 12:20:39 +00:00
fix more bugz
This commit is contained in:
parent
fab5adc17c
commit
e6a1444fac
@ -14,6 +14,12 @@ pub(crate) struct Driver {
|
||||
inner: TimeDriver,
|
||||
}
|
||||
|
||||
impl Drop for Driver {
|
||||
fn drop(&mut self) {
|
||||
println!(" + DROP DRIVER");
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct Handle {
|
||||
/// IO driver handle
|
||||
|
@ -126,8 +126,7 @@ impl Idle {
|
||||
shared.condvars[worker].notify_one();
|
||||
return;
|
||||
} else {
|
||||
// synced.idle.sleepers.push(worker);
|
||||
panic!("[tokio] unexpected condition");
|
||||
synced.idle.sleepers.push(worker);
|
||||
}
|
||||
}
|
||||
|
||||
@ -150,7 +149,7 @@ impl Idle {
|
||||
workers: &mut Vec<usize>,
|
||||
num: usize,
|
||||
) {
|
||||
let mut did_notify = false;
|
||||
debug_assert!(workers.is_empty());
|
||||
|
||||
for _ in 0..num {
|
||||
if let Some(worker) = synced.idle.sleepers.pop() {
|
||||
@ -160,18 +159,17 @@ impl Idle {
|
||||
synced.assigned_cores[worker] = Some(core);
|
||||
|
||||
workers.push(worker);
|
||||
did_notify = true;
|
||||
|
||||
continue;
|
||||
} else {
|
||||
panic!("[tokio] unexpected condition");
|
||||
synced.idle.sleepers.push(worker);
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
if did_notify {
|
||||
if !workers.is_empty() {
|
||||
let num_idle = synced.idle.available_cores.len();
|
||||
self.num_idle.store(num_idle, Release);
|
||||
} else {
|
||||
@ -184,6 +182,7 @@ impl Idle {
|
||||
}
|
||||
|
||||
pub(super) fn shutdown(&self, synced: &mut worker::Synced, shared: &Shared) {
|
||||
println!(" + start shutdown");
|
||||
// First, set the shutdown flag on each core
|
||||
for core in &mut synced.idle.available_cores {
|
||||
core.is_shutdown = true;
|
||||
@ -199,6 +198,8 @@ impl Idle {
|
||||
synced.assigned_cores[worker] = Some(core);
|
||||
shared.condvars[worker].notify_one();
|
||||
|
||||
println!(" + notify worker shutdown w/ core");
|
||||
|
||||
self.num_idle
|
||||
.store(synced.idle.available_cores.len(), Release);
|
||||
}
|
||||
@ -206,6 +207,7 @@ impl Idle {
|
||||
// Wake up any other workers
|
||||
while let Some(index) = synced.idle.sleepers.pop() {
|
||||
shared.condvars[index].notify_one();
|
||||
println!(" + notify worker shutdown NO core");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,6 +107,10 @@ pub(super) struct Core {
|
||||
/// Used to schedule bookkeeping tasks every so often.
|
||||
tick: u32,
|
||||
|
||||
/// Counter used to track when to poll from the local queue vs. the
|
||||
/// injection queue
|
||||
num_seq_local_queue_polls: u32,
|
||||
|
||||
/// When a task is scheduled from a worker, it is stored in this slot. The
|
||||
/// worker will check this slot for a task **before** checking the run
|
||||
/// queue. This effectively results in the **last** scheduled task to be run
|
||||
@ -141,6 +145,12 @@ pub(super) struct Core {
|
||||
rand: FastRand,
|
||||
}
|
||||
|
||||
impl Drop for Core {
|
||||
fn drop(&mut self) {
|
||||
println!(" DROPPED CORE");
|
||||
}
|
||||
}
|
||||
|
||||
/// State shared across all workers
|
||||
pub(crate) struct Shared {
|
||||
/// Per-core remote state.
|
||||
@ -268,6 +278,7 @@ pub(super) fn create(
|
||||
cores.push(Box::new(Core {
|
||||
index: i,
|
||||
tick: 0,
|
||||
num_seq_local_queue_polls: 0,
|
||||
lifo_slot: None,
|
||||
lifo_enabled: !config.disable_lifo_slot,
|
||||
run_queue,
|
||||
@ -560,6 +571,7 @@ impl Worker {
|
||||
.idle
|
||||
.try_acquire_available_core(&mut synced.idle)
|
||||
{
|
||||
println!(" + acquired_core; {}", self.index);
|
||||
self.reset_acquired_core(cx, synced, &mut core);
|
||||
Some(core)
|
||||
} else {
|
||||
@ -569,6 +581,7 @@ impl Worker {
|
||||
|
||||
// Block the current thread, waiting for an available core
|
||||
fn wait_for_core(&self, cx: &Context, mut synced: MutexGuard<'_, Synced>) -> NextTaskResult {
|
||||
println!(" + wait_for_core; {}", self.index);
|
||||
cx.shared()
|
||||
.idle
|
||||
.transition_worker_to_parked(&mut synced, self.index);
|
||||
@ -582,6 +595,7 @@ impl Worker {
|
||||
// If shutting down, abort
|
||||
if cx.shared().inject.is_closed(&synced.inject) {
|
||||
self.shutdown_clear_defer(cx);
|
||||
println!(" + wait_for_core; shutdown; {}", self.index);
|
||||
return Err(());
|
||||
}
|
||||
|
||||
@ -591,19 +605,22 @@ impl Worker {
|
||||
self.reset_acquired_core(cx, &mut synced, &mut core);
|
||||
|
||||
if core.is_shutdown {
|
||||
println!(" + wait_for_core; CORE(shutdown) {}", self.index);
|
||||
// Currently shutting down, don't do any more work
|
||||
return Ok((None, core));
|
||||
}
|
||||
|
||||
// The core was notified to search for work, don't try to take tasks from the injection queue
|
||||
if core.is_searching {
|
||||
println!(" + wait_for_core; SEARCHING");
|
||||
return Ok((None, core));
|
||||
}
|
||||
|
||||
// TODO: don't hardcode 128
|
||||
let n = core.run_queue.max_capacity() / 2;
|
||||
let maybe_task = self.next_remote_task_batch(cx, &mut synced, &mut core, n);
|
||||
|
||||
println!(" + wait_for_core; task={:?}", maybe_task.is_some());
|
||||
|
||||
Ok((maybe_task, core))
|
||||
}
|
||||
|
||||
@ -666,7 +683,12 @@ impl Worker {
|
||||
}
|
||||
|
||||
fn next_notified_task(&self, cx: &Context, core: &mut Core) -> Option<Notified> {
|
||||
if core.tick % core.global_queue_interval == 0 {
|
||||
core.num_seq_local_queue_polls += 1;
|
||||
|
||||
if core.num_seq_local_queue_polls % core.global_queue_interval == 0 {
|
||||
core.num_seq_local_queue_polls = 0;
|
||||
|
||||
println!(" + next_notified_task; REMOTE FIRST");
|
||||
// Update the global queue interval, if needed
|
||||
self.tune_global_queue_interval(cx, core);
|
||||
|
||||
@ -873,7 +895,7 @@ impl Worker {
|
||||
fn schedule_deferred_with_core<'a>(
|
||||
&mut self,
|
||||
cx: &'a Context,
|
||||
core: Box<Core>,
|
||||
mut core: Box<Core>,
|
||||
synced: impl FnOnce() -> MutexGuard<'a, Synced>,
|
||||
) -> NextTaskResult {
|
||||
let mut defer = cx.defer.borrow_mut();
|
||||
@ -909,6 +931,16 @@ impl Worker {
|
||||
cx.shared().condvars[worker].notify_one()
|
||||
}
|
||||
|
||||
if !defer.is_empty() {
|
||||
// Push the rest of the tasks on the local queue
|
||||
for task in defer.drain(..) {
|
||||
core.run_queue
|
||||
.push_back_or_overflow(task, cx.shared(), &mut core.stats);
|
||||
}
|
||||
|
||||
cx.shared().notify_parked_local();
|
||||
}
|
||||
|
||||
Ok((task, core))
|
||||
}
|
||||
|
||||
@ -962,18 +994,27 @@ impl Worker {
|
||||
}
|
||||
|
||||
fn park_yield(&mut self, cx: &Context, mut core: Box<Core>) -> NextTaskResult {
|
||||
println!(" + park_yield; tick={}", core.tick);
|
||||
let mut maybe_task = None;
|
||||
|
||||
// Call `park` with a 0 timeout. This enables the I/O driver, timer, ...
|
||||
// to run without actually putting the thread to sleep.
|
||||
if let Some(mut driver) = cx.shared().driver.take() {
|
||||
driver.park_timeout(&cx.handle.driver, Duration::from_millis(0));
|
||||
|
||||
cx.shared().driver.set(driver);
|
||||
|
||||
// If there are more I/O events, schedule them.
|
||||
core = n!(self.schedule_deferred_with_core(cx, core, || cx.shared().synced.lock()));
|
||||
let res = self.schedule_deferred_with_core(cx, core, || cx.shared().synced.lock())?;
|
||||
|
||||
maybe_task = res.0;
|
||||
core = res.1;
|
||||
}
|
||||
|
||||
self.flush_metrics(cx, &mut core);
|
||||
self.update_global_flags(cx, &mut cx.shared().synced.lock(), &mut core);
|
||||
Ok((None, core))
|
||||
|
||||
Ok((maybe_task, core))
|
||||
}
|
||||
|
||||
fn park(&mut self, cx: &Context, mut core: Box<Core>) -> NextTaskResult {
|
||||
@ -1050,8 +1091,11 @@ impl Worker {
|
||||
// Drop the lock before parking on the driver
|
||||
drop(synced);
|
||||
|
||||
println!(" + driver::park");
|
||||
|
||||
// Wait for driver events
|
||||
driver.park(&self.handle.driver);
|
||||
println!(" + driver::park / done");
|
||||
|
||||
synced = cx.shared().synced.lock();
|
||||
|
||||
@ -1123,16 +1167,22 @@ impl Worker {
|
||||
}
|
||||
|
||||
fn shutdown_finalize(&self, cx: &Context, mut synced: MutexGuard<'_, Synced>) {
|
||||
println!(" + shutdown core");
|
||||
|
||||
// Wait for all cores
|
||||
if synced.shutdown_cores.len() != cx.shared().remotes.len() {
|
||||
return;
|
||||
}
|
||||
|
||||
println!(" + shutdown_finalize; all cores");
|
||||
|
||||
// Wait for driver
|
||||
if cx.shared().driver.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
println!(" + shutdown_finalize; have driver");
|
||||
|
||||
debug_assert!(cx.shared().owned.is_empty());
|
||||
|
||||
for mut core in synced.shutdown_cores.drain(..) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user