remove spsc / pool / mpmc modules on targets w/o atomic / CAS support

closes #123
This commit is contained in:
Jorge Aparicio 2019-12-17 17:01:40 +01:00
parent 909251de32
commit 44c66a7484
9 changed files with 232 additions and 160 deletions

View File

@ -19,5 +19,26 @@ fn main() -> Result<(), Box<dyn Error>> {
println!("cargo:rustc-cfg=armv8m_main");
}
// built-in targets with no atomic / CAS support as of nightly-2019-12-17
// see the `no-atomics.sh` / `no-cas.sh` script sitting next to this file
match &target[..] {
"thumbv6m-none-eabi"
| "msp430-none-elf"
| "riscv32i-unknown-none-elf"
| "riscv32imc-unknown-none-elf" => {}
_ => {
println!("cargo:rustc-cfg=has_cas");
}
};
match &target[..] {
"msp430-none-elf" | "riscv32i-unknown-none-elf" | "riscv32imc-unknown-none-elf" => {}
_ => {
println!("cargo:rustc-cfg=has_atomics");
}
};
Ok(())
}

14
no-atomics.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
main() {
IFS='
'
for t in $(rustc --print target-list); do
rustc +nightly --print cfg --target $t | grep 'target_has_atomic_load_store=' >/dev/null || echo $t
done
}
main

14
no-cas.sh Normal file
View File

@ -0,0 +1,14 @@
#!/bin/bash
set -euo pipefail
main() {
IFS='
'
for t in $(rustc --print target-list); do
rustc +nightly --print cfg --target $t | grep 'target_has_atomic=' >/dev/null || echo $t
done
}
main

View File

@ -2,6 +2,7 @@
use core::{marker::PhantomData, mem::MaybeUninit};
#[cfg(has_atomics)]
use crate::spsc::{Atomic, MultiCore};
/// `const-fn` version of [`BinaryHeap`](../binary_heap/struct.BinaryHeap.html)
@ -16,6 +17,7 @@ pub struct LinearMap<A> {
}
/// `const-fn` version of [`spsc::Queue`](../spsc/struct.Queue.html)
#[cfg(has_atomics)]
pub struct Queue<A, U = usize, C = MultiCore> {
// this is from where we dequeue items
pub(crate) head: Atomic<U, C>,

View File

@ -89,10 +89,11 @@ mod ser;
pub mod binary_heap;
pub mod i;
#[cfg(all(not(armv6m), feature = "cas"))]
#[cfg(all(has_cas, feature = "cas"))]
pub mod mpmc;
#[cfg(all(not(armv6m), feature = "cas"))]
#[cfg(all(has_cas, feature = "cas"))]
pub mod pool;
#[cfg(has_atomics)]
pub mod spsc;
mod sealed;

View File

@ -1,5 +1,7 @@
//! A fixed capacity Multiple-Producer Multiple-Consumer (MPMC) lock-free queue
//!
//! NOTE: This module is not available on targets that do *not* support CAS operations, e.g. ARMv6-M
//!
//! # Example
//!
//! This queue can be constructed in "const context". Placing it in a `static` variable lets *all*

View File

@ -1,5 +1,7 @@
//! A heap-less, interrupt-safe, lock-free memory pool (\*)
//!
//! NOTE: This module is not available on targets that do *not* support CAS operations, e.g. ARMv6-M
//!
//! (\*) Currently, the implementation is only lock-free *and* `Sync` on ARMv7-M devices
//!
//! # Examples

View File

@ -1,197 +1,210 @@
/// Sealed traits and implementations for `spsc`
pub mod spsc {
#[cfg(has_atomics)]
use crate::spsc::{MultiCore, SingleCore};
use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering};
use crate::spsc::{MultiCore, SingleCore};
#[cfg(has_atomics)]
use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering};
pub unsafe trait XCore {
fn is_multi_core() -> bool;
}
unsafe impl XCore for SingleCore {
fn is_multi_core() -> bool {
false
pub unsafe trait XCore {
fn is_multi_core() -> bool;
}
}
unsafe impl XCore for MultiCore {
fn is_multi_core() -> bool {
true
#[cfg(has_atomics)]
unsafe impl XCore for SingleCore {
fn is_multi_core() -> bool {
false
}
}
}
pub unsafe trait Uxx: Into<usize> + Send {
#[doc(hidden)]
fn saturate(x: usize) -> Self;
#[cfg(has_atomics)]
unsafe impl XCore for MultiCore {
fn is_multi_core() -> bool {
true
}
}
#[doc(hidden)]
fn truncate(x: usize) -> Self;
pub unsafe trait Uxx: Into<usize> + Send {
#[doc(hidden)]
fn saturate(x: usize) -> Self;
#[doc(hidden)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore;
#[doc(hidden)]
fn truncate(x: usize) -> Self;
#[doc(hidden)]
fn load_relaxed(x: *const Self) -> Self;
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore;
#[doc(hidden)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore;
}
#[cfg(has_atomics)]
#[doc(hidden)]
fn load_relaxed(x: *const Self) -> Self;
unsafe impl Uxx for u8 {
fn saturate(x: usize) -> Self {
let max = Self::max_value() as usize;
if x >= usize::from(max) {
max as Self
} else {
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore;
}
unsafe impl Uxx for u8 {
fn saturate(x: usize) -> Self {
let max = Self::max_value() as usize;
if x >= usize::from(max) {
max as Self
} else {
x as Self
}
}
fn truncate(x: usize) -> Self {
x as Self
}
}
fn truncate(x: usize) -> Self {
x as Self
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
}
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
#[cfg(has_atomics)]
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicU8)).load(Ordering::Relaxed) }
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
}
}
}
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicU8)).load(Ordering::Relaxed) }
}
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
unsafe impl Uxx for u16 {
fn saturate(x: usize) -> Self {
let max = Self::max_value() as usize;
if x >= usize::from(max) {
max as Self
} else {
x as Self
}
}
}
}
unsafe impl Uxx for u16 {
fn saturate(x: usize) -> Self {
let max = Self::max_value() as usize;
if x >= usize::from(max) {
max as Self
} else {
fn truncate(x: usize) -> Self {
x as Self
}
}
fn truncate(x: usize) -> Self {
x as Self
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
}
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
#[cfg(has_atomics)]
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicU16)).load(Ordering::Relaxed) }
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
}
}
}
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicU16)).load(Ordering::Relaxed) }
}
unsafe impl Uxx for usize {
fn saturate(x: usize) -> Self {
x
}
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
fn truncate(x: usize) -> Self {
x
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
}
#[cfg(has_atomics)]
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicUsize)).load(Ordering::Relaxed) }
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
}
}
}
}
unsafe impl Uxx for usize {
fn saturate(x: usize) -> Self {
x
}
fn truncate(x: usize) -> Self {
x
}
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
}
fn load_relaxed(x: *const Self) -> Self {
unsafe { (*(x as *const AtomicUsize)).load(Ordering::Relaxed) }
}
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
}
}
}
}
/// Sealed traits and implementations for `binary_heap`
pub mod binary_heap {
use crate::binary_heap::{Max, Min};
use core::cmp::Ordering;
use core::cmp::Ordering;
use crate::binary_heap::{Min, Max};
/// The binary heap kind: min-heap or max-heap
pub unsafe trait Kind {
#[doc(hidden)]
fn ordering() -> Ordering;
}
/// The binary heap kind: min-heap or max-heap
pub unsafe trait Kind {
#[doc(hidden)]
fn ordering() -> Ordering;
}
unsafe impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
}
}
unsafe impl Kind for Min {
fn ordering() -> Ordering {
Ordering::Less
unsafe impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
}
unsafe impl Kind for Max {
fn ordering() -> Ordering {
Ordering::Greater
}
}
}

View File

@ -1,5 +1,8 @@
//! Fixed capacity Single Producer Single Consumer (SPSC) queue
//!
//! NOTE: This module is not available on targets that do *not* support atomic loads, e.g. RISC-V
//! cores w/o the A (Atomic) extension
//!
//! # Examples
//!
//! - `Queue` can be used as a plain queue