diff --git a/Cargo.toml b/Cargo.toml index c538acdc..92ec2954 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,6 +35,9 @@ atomic-polyfill = { version = "0.1.2", optional = true } [dependencies] hash32 = "0.2.1" +[target.'cfg(target_arch = "x86_64")'.dependencies] +spin = "0.9.2" + [dependencies.serde] version = "1" optional = true diff --git a/src/pool/cas.rs b/src/pool/cas.rs index b80c2d85..09781e1a 100644 --- a/src/pool/cas.rs +++ b/src/pool/cas.rs @@ -87,9 +87,26 @@ impl Stack { } #[cfg(target_arch = "x86_64")] -fn anchor() -> *mut T { - static mut ANCHOR: u8 = 0; - (unsafe { &mut ANCHOR } as *mut u8 as usize & !(core::mem::align_of::() - 1)) as *mut T +fn anchor(init: Option<*mut T>) -> *mut T { + use core::sync::atomic::AtomicU8; + + use spin::Once; + + static LAZY_ANCHOR: Once = Once::new(); + + let likely_unaligned_address = if let Some(init) = init { + *LAZY_ANCHOR.call_once(|| init as usize) + } else { + LAZY_ANCHOR.get().copied().unwrap_or_else(|| { + // we may hit this branch with Pool of ZSTs where `grow` does not need to be called + static BSS_ANCHOR: AtomicU8 = AtomicU8::new(0); + &BSS_ANCHOR as *const _ as usize + }) + }; + + let alignment_mask = !(core::mem::align_of::() - 1); + let well_aligned_address = likely_unaligned_address & alignment_mask; + well_aligned_address as *mut T } /// On x86_64, anchored pointer. This is a (signed) 32-bit offset from `anchor` plus a 32-bit tag @@ -116,7 +133,7 @@ impl Ptr { pub fn new(p: *mut T) -> Option { use core::convert::TryFrom; - i32::try_from((p as isize).wrapping_sub(anchor::() as isize)) + i32::try_from((p as isize).wrapping_sub(anchor::(Some(p)) as isize)) .ok() .map(|offset| unsafe { Ptr::from_parts(initial_tag_value(), offset) }) } @@ -166,7 +183,7 @@ impl Ptr { fn as_raw(&self) -> NonNull { unsafe { NonNull::new_unchecked( - (anchor::() as *mut u8).offset(self.offset() as isize) as *mut T + (anchor::(None) as *mut u8).offset(self.offset() as isize) as *mut T, ) } }