Pad fields to cacheline size to avoid false sharing (#475)

This commit is contained in:
Stjepan Glavina 2018-07-16 23:22:48 +02:00 committed by Carl Lerche
parent 6ba8e7621d
commit c17ecb53e7
6 changed files with 15 additions and 9 deletions

View File

@ -85,7 +85,7 @@ impl super::Executor for DefaultExecutor {
EXECUTOR.with(|current_executor| {
match current_executor.get() {
Some(executor) => {
let executor = unsafe { &mut *executor };
let executor = unsafe { &*executor };
executor.status()
}
None => {

View File

@ -20,6 +20,7 @@ categories = ["concurrency", "asynchronous"]
tokio-executor = { version = "0.1.2", path = "../tokio-executor" }
futures = "0.1.19"
crossbeam-deque = "0.5.0"
crossbeam-utils = "0.4.1"
num_cpus = "1.2"
rand = "0.5"
log = "0.4"

View File

@ -116,6 +116,7 @@
extern crate tokio_executor;
extern crate crossbeam_deque as deque;
extern crate crossbeam_utils;
#[macro_use]
extern crate futures;
extern crate num_cpus;

View File

@ -28,6 +28,7 @@ use std::sync::atomic::AtomicUsize;
use std::sync::Arc;
use std::thread;
use crossbeam_utils::cache_padded::CachePadded;
use rand;
#[derive(Debug)]
@ -40,10 +41,10 @@ pub(crate) struct Pool {
//
// The value of this atomic is deserialized into a `pool::State` instance.
// See comments for that type.
pub state: AtomicUsize,
pub state: CachePadded<AtomicUsize>,
// Stack tracking sleeping workers.
sleep_stack: worker::Stack,
sleep_stack: CachePadded<worker::Stack>,
// Number of workers that haven't reached the final state of shutdown
//
@ -107,8 +108,8 @@ impl Pool {
let blocking = Blocking::new(max_blocking);
let ret = Pool {
state: AtomicUsize::new(State::new().into()),
sleep_stack: worker::Stack::new(),
state: CachePadded::new(AtomicUsize::new(State::new().into())),
sleep_stack: CachePadded::new(worker::Stack::new()),
num_workers: AtomicUsize::new(0),
workers,
backup,

View File

@ -6,12 +6,14 @@ use std::sync::Arc;
use std::sync::atomic::AtomicPtr;
use std::sync::atomic::Ordering::{Acquire, Release, AcqRel, Relaxed};
use crossbeam_utils::cache_padded::CachePadded;
#[derive(Debug)]
pub(crate) struct Queue {
/// Queue head.
///
/// This is a strong reference to `Task` (i.e, `Arc<Task>`)
head: AtomicPtr<Task>,
head: CachePadded<AtomicPtr<Task>>,
/// Tail pointer. This is `Arc<Task>` unless it points to `stub`.
tail: UnsafeCell<*mut Task>,
@ -37,7 +39,7 @@ impl Queue {
let ptr = &*stub as *const _ as *mut _;
Queue {
head: AtomicPtr::new(ptr),
head: CachePadded::new(AtomicPtr::new(ptr)),
tail: UnsafeCell::new(ptr),
stub: stub,
}

View File

@ -8,6 +8,7 @@ use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::atomic::Ordering::{Acquire, AcqRel, Relaxed};
use crossbeam_utils::cache_padded::CachePadded;
use deque;
// TODO: None of the fields should be public
@ -19,7 +20,7 @@ pub(crate) struct WorkerEntry {
//
// The `usize` value is deserialized to a `worker::State` instance. See
// comments on that type.
pub state: AtomicUsize,
pub state: CachePadded<AtomicUsize>,
// Next entry in the parked Trieber stack
next_sleeper: UnsafeCell<usize>,
@ -45,7 +46,7 @@ impl WorkerEntry {
let (w, s) = deque::fifo();
WorkerEntry {
state: AtomicUsize::new(State::default().into()),
state: CachePadded::new(AtomicUsize::new(State::default().into())),
next_sleeper: UnsafeCell::new(0),
worker: w,
stealer: s,