mirror of
https://github.com/rust-embedded/heapless.git
synced 2025-10-02 14:54:30 +00:00
add comments indicating how compiler fences affect reordering of memory ops
This commit is contained in:
parent
6e521fad24
commit
e1195dff92
@ -56,8 +56,8 @@ unsafe impl Uxx for u8 {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicU8)).load(Ordering::Acquire)
|
(*(x as *const AtomicU8)).load(Ordering::Acquire)
|
||||||
} else {
|
} else {
|
||||||
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed);
|
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
|
||||||
atomic::compiler_fence(Ordering::Acquire);
|
atomic::compiler_fence(Ordering::Acquire); // ▼
|
||||||
y
|
y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -73,8 +73,8 @@ unsafe impl Uxx for u8 {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicU8)).store(val, Ordering::Release)
|
(*(x as *const AtomicU8)).store(val, Ordering::Release)
|
||||||
} else {
|
} else {
|
||||||
atomic::compiler_fence(Ordering::Release);
|
atomic::compiler_fence(Ordering::Release); // ▲
|
||||||
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed)
|
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -97,8 +97,8 @@ unsafe impl Uxx for u16 {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicU16)).load(Ordering::Acquire)
|
(*(x as *const AtomicU16)).load(Ordering::Acquire)
|
||||||
} else {
|
} else {
|
||||||
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed);
|
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
|
||||||
atomic::compiler_fence(Ordering::Acquire);
|
atomic::compiler_fence(Ordering::Acquire); // ▼
|
||||||
y
|
y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -114,8 +114,8 @@ unsafe impl Uxx for u16 {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicU16)).store(val, Ordering::Release)
|
(*(x as *const AtomicU16)).store(val, Ordering::Release)
|
||||||
} else {
|
} else {
|
||||||
atomic::compiler_fence(Ordering::Release);
|
atomic::compiler_fence(Ordering::Release); // ▲
|
||||||
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed)
|
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -132,8 +132,8 @@ unsafe impl Uxx for usize {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
|
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
|
||||||
} else {
|
} else {
|
||||||
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed);
|
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
|
||||||
atomic::compiler_fence(Ordering::Acquire);
|
atomic::compiler_fence(Ordering::Acquire); // ▼
|
||||||
y
|
y
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -149,8 +149,8 @@ unsafe impl Uxx for usize {
|
|||||||
if C::is_multi_core() {
|
if C::is_multi_core() {
|
||||||
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
|
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
|
||||||
} else {
|
} else {
|
||||||
atomic::compiler_fence(Ordering::Release);
|
atomic::compiler_fence(Ordering::Release); // ▲
|
||||||
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed);
|
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -78,18 +78,18 @@ macro_rules! impl_ {
|
|||||||
/// Returns if there are any items to dequeue. When this returns true, at least the
|
/// Returns if there are any items to dequeue. When this returns true, at least the
|
||||||
/// first subsequent dequeue will succeed.
|
/// first subsequent dequeue will succeed.
|
||||||
pub fn ready(&self) -> bool {
|
pub fn ready(&self) -> bool {
|
||||||
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
|
|
||||||
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
|
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
|
||||||
|
let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼
|
||||||
return head != tail;
|
return head != tail;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the item in the front of the queue, or `None` if the queue is empty
|
/// Returns the item in the front of the queue, or `None` if the queue is empty
|
||||||
pub fn dequeue(&mut self) -> Option<T> {
|
pub fn dequeue(&mut self) -> Option<T> {
|
||||||
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
|
|
||||||
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
|
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
|
||||||
|
let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼
|
||||||
|
|
||||||
if head != tail {
|
if head != tail {
|
||||||
Some(unsafe { self._dequeue(head) })
|
Some(unsafe { self._dequeue(head) }) // ▲
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@ -103,7 +103,7 @@ macro_rules! impl_ {
|
|||||||
pub unsafe fn dequeue_unchecked(&mut self) -> T {
|
pub unsafe fn dequeue_unchecked(&mut self) -> T {
|
||||||
let head = self.rb.as_ref().head.load_relaxed();
|
let head = self.rb.as_ref().head.load_relaxed();
|
||||||
debug_assert_ne!(head, self.rb.as_ref().tail.load_acquire());
|
debug_assert_ne!(head, self.rb.as_ref().tail.load_acquire());
|
||||||
self._dequeue(head)
|
self._dequeue(head) // ▲
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn _dequeue(&mut self, head: $uxx) -> T {
|
unsafe fn _dequeue(&mut self, head: $uxx) -> T {
|
||||||
@ -113,7 +113,7 @@ macro_rules! impl_ {
|
|||||||
let buffer = rb.buffer.get_ref();
|
let buffer = rb.buffer.get_ref();
|
||||||
|
|
||||||
let item = ptr::read(buffer.get_unchecked(usize::from(head % cap)));
|
let item = ptr::read(buffer.get_unchecked(usize::from(head % cap)));
|
||||||
rb.head.store_release(head.wrapping_add(1));
|
rb.head.store_release(head.wrapping_add(1)); // ▲
|
||||||
item
|
item
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -149,12 +149,12 @@ macro_rules! impl_ {
|
|||||||
// to the C++ memory model, which is what Rust currently uses, so we err on the side
|
// to the C++ memory model, which is what Rust currently uses, so we err on the side
|
||||||
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
|
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
|
||||||
// more details.
|
// more details.
|
||||||
let head = unsafe { self.rb.as_ref().head.load_acquire() };
|
let head = unsafe { self.rb.as_ref().head.load_acquire() }; // ▼
|
||||||
|
|
||||||
if tail.wrapping_sub(head) > cap - 1 {
|
if tail.wrapping_sub(head) > cap - 1 {
|
||||||
Err(item)
|
Err(item)
|
||||||
} else {
|
} else {
|
||||||
unsafe { self._enqueue(tail, item) };
|
unsafe { self._enqueue(tail, item) }; // ▲
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -170,7 +170,7 @@ macro_rules! impl_ {
|
|||||||
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
|
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
|
||||||
let tail = self.rb.as_ref().tail.load_relaxed();
|
let tail = self.rb.as_ref().tail.load_relaxed();
|
||||||
debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().head.load_acquire());
|
debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().head.load_acquire());
|
||||||
self._enqueue(tail, item);
|
self._enqueue(tail, item); // ▲
|
||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn _enqueue(&mut self, tail: $uxx, item: T) {
|
unsafe fn _enqueue(&mut self, tail: $uxx, item: T) {
|
||||||
@ -183,7 +183,7 @@ macro_rules! impl_ {
|
|||||||
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
|
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
|
||||||
// uninitialized memory
|
// uninitialized memory
|
||||||
ptr::write(buffer.get_unchecked_mut(usize::from(tail % cap)), item);
|
ptr::write(buffer.get_unchecked_mut(usize::from(tail % cap)), item);
|
||||||
rb.tail.store_release(tail.wrapping_add(1));
|
rb.tail.store_release(tail.wrapping_add(1)); // ▲
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
Loading…
x
Reference in New Issue
Block a user