Removed SingeCore and MultiCore, relies on atomics now

This commit is contained in:
Emil Fresk 2021-04-01 20:15:13 +02:00
parent 12682bdfd8
commit 870a925229
7 changed files with 168 additions and 309 deletions

View File

@ -1,9 +1,13 @@
error[E0499]: cannot borrow `q` as mutable more than once at a time
--> $DIR/freeze.rs:7:5
|
6 | let (_p, mut _c) = q.split();
| - first mutable borrow occurs here
7 | q.enqueue(0).unwrap();
| ^ second mutable borrow occurs here
8 | _c.dequeue();
| -- first borrow later used here
error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied
--> $DIR/freeze.rs:4:16
|
4 | let mut q: Queue<u8, _, _, 4> = Queue::new();
| ^^^^^ --- help: remove this generic argument
| |
| expected 3 generic arguments
|
note: struct defined here, with 3 generic parameters: `T`, `U`, `N`
--> $DIR/mod.rs:151:12
|
151 | pub struct Queue<T, U, const N: usize>
| ^^^^^ - - -

View File

@ -1,53 +1,44 @@
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:18:5
error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied
--> $DIR/not-send.rs:18:15
|
11 | fn is_send<T>()
| ------- required by a bound in this
12 | where
13 | T: Send,
| ---- required by this bound in `is_send`
...
18 | is_send::<Consumer<NotSend, _, _, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
| ^^^^^^^^ --- help: remove this generic argument
| |
| expected 3 generic arguments
|
= help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, _, _, 4_usize>`
note: struct defined here, with 3 generic parameters: `T`, `U`, `N`
--> $DIR/split.rs:26:12
|
26 | pub struct Consumer<'a, T, U, const N: usize>
| ^^^^^^^^ - - -
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:19:5
error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied
--> $DIR/not-send.rs:19:15
|
11 | fn is_send<T>()
| ------- required by a bound in this
12 | where
13 | T: Send,
| ---- required by this bound in `is_send`
...
19 | is_send::<Producer<NotSend, _, _, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
| ^^^^^^^^ --- help: remove this generic argument
| |
| expected 3 generic arguments
|
= help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, _, _, 4_usize>`
note: struct defined here, with 3 generic parameters: `T`, `U`, `N`
--> $DIR/split.rs:43:12
|
43 | pub struct Producer<'a, T, U, const N: usize>
| ^^^^^^^^ - - -
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:20:5
|
11 | fn is_send<T>()
| ------- required by a bound in this
12 | where
13 | T: Send,
| ---- required by this bound in `is_send`
...
20 | is_send::<Queue<NotSend, _, _, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `Queue<PhantomData<*const ()>, _, _, 4_usize>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because it appears within the type `[PhantomData<*const ()>; 4]`
= note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>`
= note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>`
= note: required because it appears within the type `Queue<PhantomData<*const ()>, _, _, 4_usize>`
error[E0107]: this struct takes 3 generic arguments but 4 generic arguments were supplied
--> $DIR/not-send.rs:20:15
|
20 | is_send::<Queue<NotSend, _, _, 4>>();
| ^^^^^ --- help: remove this generic argument
| |
| expected 3 generic arguments
|
note: struct defined here, with 3 generic parameters: `T`, `U`, `N`
--> $DIR/mod.rs:151:12
|
151 | pub struct Queue<T, U, const N: usize>
| ^^^^^ - - -
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:21:5

View File

@ -1,27 +1,7 @@
/// Sealed traits and implementations for `spsc`
pub mod spsc {
#[cfg(has_atomics)]
use crate::spsc::{MultiCore, SingleCore};
#[cfg(has_atomics)]
use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering};
pub unsafe trait XCore {
fn is_multi_core() -> bool;
}
#[cfg(has_atomics)]
unsafe impl XCore for SingleCore {
fn is_multi_core() -> bool {
false
}
}
#[cfg(has_atomics)]
unsafe impl XCore for MultiCore {
fn is_multi_core() -> bool {
true
}
}
use core::sync::atomic::{AtomicU16, AtomicU8, AtomicUsize, Ordering};
pub unsafe trait Uxx: Into<usize> + Send {
#[doc(hidden)]
@ -32,9 +12,7 @@ pub mod spsc {
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore;
unsafe fn load_acquire(x: *const Self) -> Self;
#[cfg(has_atomics)]
#[doc(hidden)]
@ -42,9 +20,7 @@ pub mod spsc {
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore;
unsafe fn store_release(x: *const Self, val: Self);
}
unsafe impl Uxx for u8 {
@ -62,17 +38,11 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
// let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
// atomic::compiler_fence(Ordering::Acquire); // ▼
// y
}
#[cfg(has_atomics)]
@ -81,16 +51,10 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
// atomic::compiler_fence(Ordering::Release); // ▲
// (*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
}
}
@ -109,17 +73,11 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
// let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
// atomic::compiler_fence(Ordering::Acquire); // ▼
// y
}
#[cfg(has_atomics)]
@ -128,16 +86,10 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
// atomic::compiler_fence(Ordering::Release); // ▲
// (*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
}
}
@ -151,17 +103,11 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
// let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
// atomic::compiler_fence(Ordering::Acquire); // ▼
// y
}
#[cfg(has_atomics)]
@ -170,16 +116,10 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
// atomic::compiler_fence(Ordering::Release); // ▲
// (*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
}
}
}

View File

@ -10,7 +10,7 @@
//! ```
//! use heapless::spsc::Queue;
//!
//! let mut rb: Queue<u8, _, _, 4> = Queue::new();
//! let mut rb: Queue<u8, _, 4> = Queue::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
@ -24,11 +24,11 @@
//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode
//!
//! ```
//! use heapless::spsc::{Queue, MultiCore};
//! use heapless::spsc::{Queue};
//!
//! // Notice, type signature needs to be explicit for now.
//! // (min_const_eval, does not allow for default type assignments)
//! static mut Q: Queue<Event, usize, MultiCore, 4> = Queue::new();
//! static mut Q: Queue<Event, usize, 4> = Queue::new();
//!
//! enum Event { A, B }
//!
@ -83,7 +83,7 @@
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
use core::{cell::UnsafeCell, fmt, hash, marker::PhantomData, mem::MaybeUninit, ptr};
use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr};
use hash32;
@ -92,32 +92,23 @@ pub use split::{Consumer, Producer};
mod split;
/// Multi core synchronization - a memory barrier is used for synchronization
pub struct MultiCore;
/// Single core synchronization - no memory barrier synchronization, just a compiler fence
pub struct SingleCore;
// Atomic{U8,U16, Usize} with no CAS operations that works on targets that have "no atomic support"
// Atomic{U8, U16, Usize} with no CAS operations that works on targets that have "no atomic support"
// according to their specification
pub(crate) struct Atomic<U, C> {
pub(crate) struct Atomic<U> {
v: UnsafeCell<U>,
c: PhantomData<C>,
}
impl<U, C> Atomic<U, C> {
impl<U> Atomic<U> {
pub(crate) const fn new(v: U) -> Self {
Atomic {
v: UnsafeCell::new(v),
c: PhantomData,
}
}
}
impl<U, C> Atomic<U, C>
impl<U> Atomic<U>
where
U: sealed::Uxx,
C: sealed::XCore,
{
fn get(&self) -> &U {
unsafe { &*self.v.get() }
@ -128,7 +119,7 @@ where
}
fn load_acquire(&self) -> U {
unsafe { U::load_acquire::<C>(self.v.get()) }
unsafe { U::load_acquire(self.v.get()) }
}
fn load_relaxed(&self) -> U {
@ -136,7 +127,7 @@ where
}
fn store_release(&self, val: U) {
unsafe { U::store_release::<C>(self.v.get(), val) }
unsafe { U::store_release(self.v.get(), val) }
}
}
@ -153,33 +144,26 @@ where
/// [`u8`]: struct.Queue.html#method.u8
/// [`u16`]: struct.Queue.html#method.u16
///
/// *IMPORTANT*: `spsc::Queue<_, _, u8>` has a maximum capacity of 255 elements; `spsc::Queue<_, _,
/// u16>` has a maximum capacity of 65535 elements.
///
/// `spsc::Queue` also comes in a single core variant. This variant can be created using the
/// following constructors: `u8_sc`, `u16_sc`, `usize_sc` and `new_sc`. This variant is `unsafe` to
/// create because the programmer must make sure that the queue's consumer and producer endpoints
/// (if split) are kept on a single core for their entire lifetime.
/// *IMPORTANT*: `spsc::Queue<_, u8, N>` has a maximum capacity of 255 elements; `spsc::Queue<_,
/// u16, N>` has a maximum capacity of 65535 elements.
#[cfg(has_atomics)]
pub struct Queue<T, U, C, const N: usize>
pub struct Queue<T, U, const N: usize>
where
U: sealed::Uxx,
C: sealed::XCore,
{
// this is from where we dequeue items
pub(crate) head: Atomic<U, C>,
pub(crate) head: Atomic<U>,
// this is where we enqueue new items
pub(crate) tail: Atomic<U, C>,
pub(crate) tail: Atomic<U>,
pub(crate) buffer: MaybeUninit<[T; N]>,
}
impl<T, U, C, const N: usize> Queue<T, U, C, N>
impl<T, U, const N: usize> Queue<T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
/// Returns the maximum number of elements the queue can hold
pub fn capacity(&self) -> U {
@ -192,7 +176,7 @@ where
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<'_, T, U, C, N> {
pub fn iter(&self) -> Iter<'_, T, U, N> {
Iter {
rb: self,
index: 0,
@ -201,7 +185,7 @@ where
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<'_, T, U, C, N> {
pub fn iter_mut(&mut self) -> IterMut<'_, T, U, N> {
let len = self.len_usize();
IterMut {
rb: self,
@ -218,10 +202,9 @@ where
}
}
impl<T, U, C, const N: usize> Drop for Queue<T, U, C, N>
impl<T, U, const N: usize> Drop for Queue<T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
fn drop(&mut self) {
for item in self {
@ -232,22 +215,20 @@ where
}
}
impl<T, U, C, const N: usize> fmt::Debug for Queue<T, U, C, N>
impl<T, U, const N: usize> fmt::Debug for Queue<T, U, N>
where
T: fmt::Debug,
U: sealed::Uxx,
C: sealed::XCore,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, U, C, const N: usize> hash::Hash for Queue<T, U, C, N>
impl<T, U, const N: usize> hash::Hash for Queue<T, U, N>
where
T: hash::Hash,
U: sealed::Uxx,
C: sealed::XCore,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
// iterate over self in order
@ -257,11 +238,10 @@ where
}
}
impl<T, U, C, const N: usize> hash32::Hash for Queue<T, U, C, N>
impl<T, U, const N: usize> hash32::Hash for Queue<T, U, N>
where
T: hash32::Hash,
U: sealed::Uxx,
C: sealed::XCore,
{
fn hash<H: hash32::Hasher>(&self, state: &mut H) {
// iterate over self in order
@ -271,26 +251,24 @@ where
}
}
impl<'a, T, U, C, const N: usize> IntoIterator for &'a Queue<T, U, C, N>
impl<'a, T, U, const N: usize> IntoIterator for &'a Queue<T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = &'a T;
type IntoIter = Iter<'a, T, U, C, N>;
type IntoIter = Iter<'a, T, U, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, U, C, const N: usize> IntoIterator for &'a mut Queue<T, U, C, N>
impl<'a, T, U, const N: usize> IntoIterator for &'a mut Queue<T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, U, C, N>;
type IntoIter = IterMut<'a, T, U, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
@ -298,8 +276,8 @@ where
}
macro_rules! impl_ {
($uxx:ident, $uxx_sc:ident) => {
impl<T, const N: usize> Queue<T, $uxx, MultiCore, N> {
($uxx:ident) => {
impl<T, const N: usize> Queue<T, $uxx, N> {
/// Creates an empty queue with a fixed capacity of `N`
pub const fn $uxx() -> Self {
Self {
@ -310,21 +288,7 @@ macro_rules! impl_ {
}
}
impl<T, const N: usize> Queue<T, $uxx, SingleCore, N> {
/// Creates an empty queue with a fixed capacity of `N` (single core variant)
pub const unsafe fn $uxx_sc() -> Self {
Self {
buffer: MaybeUninit::uninit(),
head: Atomic::new(0),
tail: Atomic::new(0),
}
}
}
impl<T, C, const N: usize> Queue<T, $uxx, C, N>
where
C: sealed::XCore,
{
impl<T, const N: usize> Queue<T, $uxx, N> {
/// Returns a reference to the item in the front of the queue without dequeuing, or
/// `None` if the queue is empty.
///
@ -332,7 +296,7 @@ macro_rules! impl_ {
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, _, _, 235> = Queue::u8();
/// let mut queue: Queue<u8, _, 235> = Queue::u8();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
@ -419,13 +383,12 @@ macro_rules! impl_ {
}
}
impl<T, C, const N: usize> Clone for Queue<T, $uxx, C, N>
impl<T, const N: usize> Clone for Queue<T, $uxx, N>
where
T: Clone,
C: sealed::XCore,
{
fn clone(&self) -> Self {
let mut new: Queue<T, $uxx, C, N> = Queue {
let mut new: Queue<T, $uxx, N> = Queue {
buffer: MaybeUninit::uninit(),
head: Atomic::new(0),
tail: Atomic::new(0),
@ -444,62 +407,49 @@ macro_rules! impl_ {
};
}
impl<T, const N: usize> Queue<T, usize, MultiCore, N> {
impl<T, const N: usize> Queue<T, usize, N> {
/// Alias for [`spsc::Queue::usize`](struct.Queue.html#method.usize)
pub const fn new() -> Self {
Queue::usize()
}
}
impl<T, const N: usize> Queue<T, usize, SingleCore, N> {
/// Alias for [`spsc::Queue::usize_sc`](struct.Queue.html#method.usize_sc)
pub unsafe fn new_sc() -> Self {
Queue::usize_sc()
}
}
impl_!(u8);
impl_!(u16);
impl_!(usize);
impl_!(u8, u8_sc);
impl_!(u16, u16_sc);
impl_!(usize, usize_sc);
impl<T, U, C, U2, C2, const N: usize, const N2: usize> PartialEq<Queue<T, U2, C2, N2>>
for Queue<T, U, C, N>
impl<T, U, U2, const N: usize, const N2: usize> PartialEq<Queue<T, U2, N2>> for Queue<T, U, N>
where
T: PartialEq,
U: sealed::Uxx,
C: sealed::XCore,
U2: sealed::Uxx,
C2: sealed::XCore,
{
fn eq(&self, other: &Queue<T, U2, C2, N2>) -> bool {
fn eq(&self, other: &Queue<T, U2, N2>) -> bool {
self.len_usize() == other.len_usize()
&& self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2)
}
}
impl<T, U, C, const N: usize> Eq for Queue<T, U, C, N>
impl<T, U, const N: usize> Eq for Queue<T, U, N>
where
T: Eq,
U: sealed::Uxx,
C: sealed::XCore,
{
}
/// An iterator over the items of a queue
pub struct Iter<'a, T, U, C, const N: usize>
pub struct Iter<'a, T, U, const N: usize>
where
U: sealed::Uxx,
C: sealed::XCore,
{
rb: &'a Queue<T, U, C, N>,
rb: &'a Queue<T, U, N>,
index: usize,
len: usize,
}
impl<'a, T, U, C, const N: usize> Clone for Iter<'a, T, U, C, N>
impl<'a, T, U, const N: usize> Clone for Iter<'a, T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
fn clone(&self) -> Self {
Self {
@ -511,22 +461,20 @@ where
}
/// A mutable iterator over the items of a queue
pub struct IterMut<'a, T, U, C, const N: usize>
pub struct IterMut<'a, T, U, const N: usize>
where
U: sealed::Uxx,
C: sealed::XCore,
{
rb: &'a mut Queue<T, U, C, N>,
rb: &'a mut Queue<T, U, N>,
index: usize,
len: usize,
}
macro_rules! iterator {
(struct $name:ident -> $elem:ty, $ptr:ty, $asptr:ident, $mkref:ident) => {
impl<'a, T, U, C, const N: usize> Iterator for $name<'a, T, U, C, N>
impl<'a, T, U, const N: usize> Iterator for $name<'a, T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = $elem;
@ -545,10 +493,9 @@ macro_rules! iterator {
}
}
impl<'a, T, U, C, const N: usize> DoubleEndedIterator for $name<'a, T, U, C, N>
impl<'a, T, U, const N: usize> DoubleEndedIterator for $name<'a, T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
fn next_back(&mut self) -> Option<$elem> {
if self.index < self.len {
@ -587,21 +534,16 @@ iterator!(struct IterMut -> &'a mut T, *mut T, as_mut_ptr, make_ref_mut);
mod tests {
use hash32::Hasher;
use crate::spsc::{MultiCore, Queue, SingleCore};
#[test]
fn static_usize_sc() {
static mut _Q: Queue<i32, usize, SingleCore, 4> = unsafe { Queue::usize_sc() };
}
use crate::spsc::Queue;
#[test]
fn static_usize() {
static mut _Q: Queue<i32, usize, MultiCore, 4> = Queue::usize();
static mut _Q: Queue<i32, usize, 4> = Queue::usize();
}
#[test]
fn static_new() {
static mut _Q: Queue<i32, usize, MultiCore, 4> = Queue::new();
static mut _Q: Queue<i32, usize, 4> = Queue::new();
}
#[test]
@ -627,7 +569,7 @@ mod tests {
static mut COUNT: i32 = 0;
{
let mut v: Queue<Droppable, usize, SingleCore, 4> = unsafe { Queue::usize_sc() };
let mut v: Queue<Droppable, usize, 4> = Queue::usize();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
v.dequeue().unwrap();
@ -636,7 +578,7 @@ mod tests {
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Queue<Droppable, usize, MultiCore, 4> = Queue::usize();
let mut v: Queue<Droppable, usize, 4> = Queue::usize();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
}
@ -646,7 +588,7 @@ mod tests {
#[test]
fn full() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -658,7 +600,7 @@ mod tests {
#[test]
fn iter() {
let mut rb: Queue<i32, u16, MultiCore, 4> = Queue::u16();
let mut rb: Queue<i32, u16, 4> = Queue::u16();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -674,7 +616,7 @@ mod tests {
#[test]
fn iter_double_ended() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -691,7 +633,7 @@ mod tests {
#[test]
fn iter_overflow() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
rb.enqueue(0).unwrap();
for _ in 0..300 {
@ -705,7 +647,7 @@ mod tests {
#[test]
fn iter_mut() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -721,7 +663,7 @@ mod tests {
#[test]
fn iter_mut_double_ended() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -738,7 +680,7 @@ mod tests {
#[test]
fn sanity() {
let mut rb: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = Queue::u8();
assert_eq!(rb.dequeue(), None);
rb.enqueue(0).unwrap();
assert_eq!(rb.dequeue(), Some(0));
@ -748,7 +690,7 @@ mod tests {
#[test]
#[cfg(feature = "smaller-atomics")]
fn u8() {
let mut rb: Queue<u8, u8, MultiCore, 256> = Queue::u8();
let mut rb: Queue<u8, u8, 256> = Queue::u8();
for _ in 0..255 {
rb.enqueue(0).unwrap();
@ -759,7 +701,7 @@ mod tests {
#[test]
fn wrap_around() {
let mut rb: Queue<i32, u8, MultiCore, 3> = Queue::u8();
let mut rb: Queue<i32, u8, 3> = Queue::u8();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -775,7 +717,7 @@ mod tests {
#[test]
fn ready_flag() {
let mut rb: Queue<i32, u8, MultiCore, 2> = Queue::u8();
let mut rb: Queue<i32, u8, 2> = Queue::u8();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
@ -803,7 +745,7 @@ mod tests {
#[test]
fn clone() {
let mut rb1: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb1: Queue<i32, u8, 4> = Queue::u8();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
@ -818,12 +760,12 @@ mod tests {
fn eq() {
// generate two queues with same content
// but different buffer alignment
let mut rb1: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb1: Queue<i32, u8, 4> = Queue::u8();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let mut rb2: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb2: Queue<i32, u8, 4> = Queue::u8();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
assert!(rb1 == rb2);
@ -844,7 +786,7 @@ mod tests {
// generate two queues with same content
// but different buffer alignment
let rb1 = {
let mut rb1: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb1: Queue<i32, u8, 4> = Queue::u8();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
@ -852,7 +794,7 @@ mod tests {
rb1
};
let rb2 = {
let mut rb2: Queue<i32, u8, MultiCore, 4> = Queue::u8();
let mut rb2: Queue<i32, u8, 4> = Queue::u8();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
rb2

View File

@ -1,18 +1,13 @@
use core::{marker::PhantomData, ptr::NonNull};
use crate::{
sealed::spsc as sealed,
spsc::Queue,
// spsc::{MultiCore, Queue}, // we cannot currently default to MultiCore
};
use crate::{sealed::spsc as sealed, spsc::Queue};
impl<T, U, C, const N: usize> Queue<T, U, C, N>
impl<T, U, const N: usize> Queue<T, U, N>
where
U: sealed::Uxx,
C: sealed::XCore,
{
/// Splits a statically allocated queue into producer and consumer end points
pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, C, N>, Consumer<'rb, T, U, C, N>) {
pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, N>, Consumer<'rb, T, U, N>) {
(
Producer {
rb: unsafe { NonNull::new_unchecked(self) },
@ -28,48 +23,41 @@ where
/// A queue "consumer"; it can dequeue items from the queue
// NOTE the consumer semantically owns the `head` pointer of the queue
pub struct Consumer<'a, T, U, C, const N: usize>
pub struct Consumer<'a, T, U, const N: usize>
where
U: sealed::Uxx,
C: sealed::XCore,
{
rb: NonNull<Queue<T, U, C, N>>,
rb: NonNull<Queue<T, U, N>>,
_marker: PhantomData<&'a ()>,
}
unsafe impl<'a, T, U, C, const N: usize> Send for Consumer<'a, T, U, C, N>
unsafe impl<'a, T, U, const N: usize> Send for Consumer<'a, T, U, N>
where
T: Send,
U: sealed::Uxx,
C: sealed::XCore,
{
}
/// A queue "producer"; it can enqueue items into the queue
// NOTE the producer semantically owns the `tail` pointer of the queue
pub struct Producer<'a, T, U, C, const N: usize>
pub struct Producer<'a, T, U, const N: usize>
where
U: sealed::Uxx,
C: sealed::XCore,
{
rb: NonNull<Queue<T, U, C, N>>,
rb: NonNull<Queue<T, U, N>>,
_marker: PhantomData<&'a ()>,
}
unsafe impl<'a, T, U, C, const N: usize> Send for Producer<'a, T, U, C, N>
unsafe impl<'a, T, U, const N: usize> Send for Producer<'a, T, U, N>
where
T: Send,
U: sealed::Uxx,
C: sealed::XCore,
{
}
macro_rules! impl_ {
($uxx:ident) => {
impl<'a, T, C, const N: usize> Consumer<'a, T, $uxx, C, N>
where
C: sealed::XCore,
{
impl<'a, T, const N: usize> Consumer<'a, T, $uxx, N> {
/// Returns if there are any items to dequeue. When this returns true, at least the
/// first subsequent dequeue will succeed.
pub fn ready(&self) -> bool {
@ -84,7 +72,7 @@ macro_rules! impl_ {
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, _, _, 235> = Queue::u8();
/// let mut queue: Queue<u8, _, 235> = Queue::u8();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
@ -165,10 +153,7 @@ macro_rules! impl_ {
}
}
impl<'a, T, C, const N: usize> Producer<'a, T, $uxx, C, N>
where
C: sealed::XCore,
{
impl<'a, T, const N: usize> Producer<'a, T, $uxx, N> {
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
pub fn ready(&self) -> bool {
@ -259,11 +244,11 @@ impl_!(usize);
#[cfg(test)]
mod tests {
use crate::spsc::{MultiCore, Queue};
use crate::spsc::Queue;
#[test]
fn sanity() {
let mut rb: Queue<i32, u8, MultiCore, 2> = Queue::u8();
let mut rb: Queue<i32, u8, 2> = Queue::u8();
let (mut p, mut c) = rb.split();

View File

@ -1,7 +1,7 @@
//! Collections of `Send`-able things are `Send`
use heapless::{
spsc::{Consumer, MultiCore, Producer, Queue},
spsc::{Consumer, Producer, Queue},
HistoryBuffer, Vec,
};
@ -17,9 +17,9 @@ fn send() {
{
}
is_send::<Consumer<IsSend, usize, MultiCore, 4>>();
is_send::<Producer<IsSend, usize, MultiCore, 4>>();
is_send::<Queue<IsSend, usize, MultiCore, 4>>();
is_send::<Consumer<IsSend, usize, 4>>();
is_send::<Producer<IsSend, usize, 4>>();
is_send::<Queue<IsSend, usize, 4>>();
is_send::<Vec<IsSend, 4>>();
is_send::<HistoryBuffer<IsSend, 4>>();
}

View File

@ -4,15 +4,12 @@
use std::{sync::mpsc, thread};
use heapless::{
mpmc::Q64,
spsc::{self, MultiCore},
};
use heapless::{mpmc::Q64, spsc};
use scoped_threadpool::Pool;
#[test]
fn once() {
static mut RB: spsc::Queue<i32, usize, MultiCore, 4> = spsc::Queue::new();
static mut RB: spsc::Queue<i32, usize, 4> = spsc::Queue::new();
let rb = unsafe { &mut RB };
@ -33,7 +30,7 @@ fn once() {
#[test]
fn twice() {
static mut RB: spsc::Queue<i32, usize, MultiCore, 4> = spsc::Queue::new();
static mut RB: spsc::Queue<i32, usize, 4> = spsc::Queue::new();
let rb = unsafe { &mut RB };
@ -55,7 +52,7 @@ fn twice() {
#[test]
fn scoped() {
let mut rb: spsc::Queue<i32, usize, MultiCore, 4> = spsc::Queue::new();
let mut rb: spsc::Queue<i32, usize, 4> = spsc::Queue::new();
rb.enqueue(0).unwrap();
@ -80,7 +77,7 @@ fn scoped() {
fn contention() {
const N: usize = 1024;
let mut rb: spsc::Queue<u8, usize, MultiCore, 4> = spsc::Queue::new();
let mut rb: spsc::Queue<u8, usize, 4> = spsc::Queue::new();
{
let (mut p, mut c) = rb.split();
@ -167,7 +164,7 @@ fn mpmc_contention() {
fn unchecked() {
const N: usize = 1024;
let mut rb: spsc::Queue<u8, usize, MultiCore, N> = spsc::Queue::new();
let mut rb: spsc::Queue<u8, usize, N> = spsc::Queue::new();
for _ in 0..N / 2 {
rb.enqueue(1).unwrap();
@ -203,7 +200,7 @@ fn unchecked() {
#[test]
fn len_properly_wraps() {
const N: usize = 3;
let mut rb: spsc::Queue<u8, usize, MultiCore, N> = spsc::Queue::new();
let mut rb: spsc::Queue<u8, usize, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
@ -220,7 +217,7 @@ fn len_properly_wraps() {
#[test]
fn iterator_properly_wraps() {
const N: usize = 3;
let mut rb: spsc::Queue<u8, usize, MultiCore, N> = spsc::Queue::new();
let mut rb: spsc::Queue<u8, usize, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
rb.dequeue();