mirror of
https://github.com/rust-embedded/heapless.git
synced 2025-09-28 13:00:26 +00:00
Merge #59
59: v0.4.0 r=japaric a=japaric Co-authored-by: Jorge Aparicio <jorge@japaric.io>
This commit is contained in:
commit
f2cad2fc55
19
CHANGELOG.md
19
CHANGELOG.md
@ -7,6 +7,22 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [v0.4.0] - 2018-10-19
|
||||
|
||||
### Changed
|
||||
|
||||
- [breaking-change] All Cargo features are disabled by default. This crate now
|
||||
compiles on stable by default.
|
||||
|
||||
- [breaking-change] RingBuffer has been renamed to spsc::Queue. The ring_buffer
|
||||
module has been renamed to spsc.
|
||||
|
||||
- [breaking-change] The bounds on spsc::Queue have changed.
|
||||
|
||||
### Removed
|
||||
|
||||
- [breaking-change] The sealed `Uxx` trait has been removed from the public API.
|
||||
|
||||
## [v0.3.7] - 2018-08-19
|
||||
|
||||
### Added
|
||||
@ -152,7 +168,8 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
- Initial release
|
||||
|
||||
[Unreleased]: https://github.com/japaric/heapless/compare/v0.3.7...HEAD
|
||||
[Unreleased]: https://github.com/japaric/heapless/compare/v0.4.0...HEAD
|
||||
[v0.3.8]: https://github.com/japaric/heapless/compare/v0.3.7...v0.4.0
|
||||
[v0.3.7]: https://github.com/japaric/heapless/compare/v0.3.6...v0.3.7
|
||||
[v0.3.6]: https://github.com/japaric/heapless/compare/v0.3.5...v0.3.6
|
||||
[v0.3.5]: https://github.com/japaric/heapless/compare/v0.3.4...v0.3.5
|
||||
|
@ -16,10 +16,9 @@ keywords = [
|
||||
license = "MIT OR Apache-2.0"
|
||||
name = "heapless"
|
||||
repository = "https://github.com/japaric/heapless"
|
||||
version = "0.3.7"
|
||||
version = "0.4.0"
|
||||
|
||||
[features]
|
||||
default = ["const-fn", "smaller-atomics"]
|
||||
const-fn = []
|
||||
smaller-atomics = []
|
||||
|
||||
|
16
ci/script.sh
16
ci/script.sh
@ -1,27 +1,27 @@
|
||||
set -euxo pipefail
|
||||
|
||||
main() {
|
||||
cargo check --target $TARGET --no-default-features
|
||||
cargo check --target $TARGET
|
||||
if [ $TRAVIS_RUST_VERSION = nightly ]; then
|
||||
cargo check --target $TARGET
|
||||
cargo check --target $TARGET --features 'const-fn smaller-atomics'
|
||||
fi
|
||||
|
||||
if [ $TARGET = x86_64-unknown-linux-gnu ]; then
|
||||
cargo test --target $TARGET --no-default-features
|
||||
cargo test --target $TARGET --release --no-default-features
|
||||
cargo test --target $TARGET
|
||||
cargo test --target $TARGET --release
|
||||
|
||||
if [ $TRAVIS_RUST_VERSION = nightly ]; then
|
||||
cargo test --target $TARGET
|
||||
cargo test --target $TARGET --release
|
||||
cargo test --target $TARGET --features 'const-fn smaller-atomics'
|
||||
cargo test --target $TARGET --release --features 'const-fn smaller-atomics'
|
||||
|
||||
export RUSTFLAGS="-Z sanitizer=thread"
|
||||
export RUST_TEST_THREADS=1
|
||||
export TSAN_OPTIONS="suppressions=$(pwd)/blacklist.txt"
|
||||
|
||||
cargo test --test tsan --target $TARGET
|
||||
cargo test --test tsan --target $TARGET --no-default-features
|
||||
cargo test --test tsan --target $TARGET --features 'const-fn smaller-atomics'
|
||||
cargo test --test tsan --target $TARGET --release
|
||||
cargo test --test tsan --target $TARGET --release --no-default-features
|
||||
cargo test --test tsan --target $TARGET --release --features 'const-fn smaller-atomics'
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -15,7 +15,7 @@ pub mod mem {
|
||||
// workaround to get this to compile on stable ("unions with non-`Copy` fields are unstable")
|
||||
#[cfg(not(feature = "const-fn"))]
|
||||
pub struct MaybeUninit<T> {
|
||||
value: ManuallyDrop<T>
|
||||
value: ManuallyDrop<T>,
|
||||
}
|
||||
|
||||
impl<T> MaybeUninit<T> {
|
||||
|
@ -108,7 +108,7 @@ where
|
||||
{
|
||||
/* Constructors */
|
||||
|
||||
const_fn!(
|
||||
const_fn! {
|
||||
/// Creates an empty BinaryHeap as a $K-heap.
|
||||
///
|
||||
/// ```
|
||||
@ -124,7 +124,7 @@ where
|
||||
data: Vec::new(),
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/* Public API */
|
||||
/// Returns the capacity of the binary heap.
|
||||
|
16
src/cfail.rs
16
src/cfail.rs
@ -5,8 +5,8 @@
|
||||
//! Collections of `Send`-able things are `Send`
|
||||
//!
|
||||
//! ```
|
||||
//! use heapless::{RingBuffer, Vec};
|
||||
//! use heapless::ring_buffer::{Consumer, Producer};
|
||||
//! use heapless::Vec;
|
||||
//! use heapless::spsc::{Consumer, Queue, Producer};
|
||||
//! use heapless::consts::*;
|
||||
//!
|
||||
//! struct IsSend;
|
||||
@ -17,7 +17,7 @@
|
||||
//!
|
||||
//! is_send::<Consumer<IsSend, U4>>();
|
||||
//! is_send::<Producer<IsSend, U4>>();
|
||||
//! is_send::<RingBuffer<IsSend, U4>>();
|
||||
//! is_send::<Queue<IsSend, U4>>();
|
||||
//! is_send::<Vec<IsSend, U4>>();
|
||||
//! ```
|
||||
//!
|
||||
@ -49,13 +49,13 @@
|
||||
//!
|
||||
//! ``` compile_fail
|
||||
//! use std::marker::PhantomData;
|
||||
//! use heapless::RingBuffer;
|
||||
//! use heapless::spsc::Queue;
|
||||
//!
|
||||
//! type NotSend = PhantomData<*const ()>;
|
||||
//!
|
||||
//! fn is_send<T>() where T: Send {}
|
||||
//!
|
||||
//! is_send::<RingBuffer<NotSend, [NotSend; 4]>>();
|
||||
//! is_send::<Queue<NotSend, [NotSend; 4]>>();
|
||||
//! ```
|
||||
//!
|
||||
//! ``` compile_fail
|
||||
@ -71,12 +71,12 @@
|
||||
//!
|
||||
//! # Freeze
|
||||
//!
|
||||
//! Splitting a `RingBuffer` should invalidate the original reference.
|
||||
//! Splitting a `Queue` should invalidate the original reference.
|
||||
//!
|
||||
//! ``` compile_fail
|
||||
//! use heapless::RingBuffer;
|
||||
//! use heapless::spsc::Queue;
|
||||
//!
|
||||
//! let mut rb: RingBuffer<u8, [u8; 4]> = RingBuffer::new();
|
||||
//! let mut rb: Queue<u8, [u8; 4]> = Queue::new();
|
||||
//!
|
||||
//! let (p, c) = rb.split();
|
||||
//! rb.enqueue(0).unwrap();
|
||||
|
@ -131,9 +131,9 @@ where
|
||||
if dist > entry_hash.probe_distance(Self::mask(), probe) {
|
||||
// give up when probe distance is too long
|
||||
return None;
|
||||
} else if entry_hash == hash && unsafe {
|
||||
self.entries.get_unchecked(i).key.borrow() == query
|
||||
} {
|
||||
} else if entry_hash == hash
|
||||
&& unsafe { self.entries.get_unchecked(i).key.borrow() == query }
|
||||
{
|
||||
return Some((probe, i));
|
||||
}
|
||||
} else {
|
||||
|
@ -78,14 +78,12 @@
|
||||
//! This way they can be used to initialize static memory at compile time.
|
||||
//!
|
||||
|
||||
|
||||
#![allow(warnings)]
|
||||
#![deny(missing_docs)]
|
||||
#![deny(warnings)]
|
||||
#![cfg_attr(feature = "const-fn", feature(const_fn))]
|
||||
#![cfg_attr(feature = "const-fn", feature(const_manually_drop_new))]
|
||||
#![cfg_attr(feature = "const-fn", feature(untagged_unions))]
|
||||
#![cfg_attr(feature = "smaller-atomics", feature(core_intrinsics))]
|
||||
#![cfg_attr(feature = "smaller-atomics", feature(integer_atomics))]
|
||||
#![no_std]
|
||||
|
||||
extern crate generic_array;
|
||||
@ -102,7 +100,6 @@ pub use generic_array::ArrayLength;
|
||||
pub use indexmap::{FnvIndexMap, IndexMap};
|
||||
pub use indexset::{FnvIndexSet, IndexSet};
|
||||
pub use linear_map::LinearMap;
|
||||
pub use ring_buffer::RingBuffer;
|
||||
pub use string::String;
|
||||
pub use vec::Vec;
|
||||
|
||||
@ -114,6 +111,7 @@ mod string;
|
||||
mod vec;
|
||||
|
||||
pub mod binary_heap;
|
||||
pub mod ring_buffer;
|
||||
pub mod spsc;
|
||||
|
||||
mod __core;
|
||||
mod sealed;
|
||||
|
@ -21,8 +21,7 @@ where
|
||||
N: ArrayLength<(K, V)>,
|
||||
K: Eq,
|
||||
{
|
||||
|
||||
const_fn!(
|
||||
const_fn! {
|
||||
/// Creates an empty `LinearMap`
|
||||
///
|
||||
/// # Examples
|
||||
@ -36,7 +35,7 @@ where
|
||||
pub const fn new() -> Self {
|
||||
LinearMap { buffer: Vec::new() }
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns the number of elements that the map can hold
|
||||
///
|
||||
@ -309,7 +308,8 @@ where
|
||||
K: Borrow<Q>,
|
||||
Q: Eq + ?Sized,
|
||||
{
|
||||
let idx = self.keys()
|
||||
let idx = self
|
||||
.keys()
|
||||
.enumerate()
|
||||
.find(|&(_, k)| k.borrow() == key)
|
||||
.map(|(idx, _)| idx);
|
||||
@ -443,7 +443,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(feature = "const-fn")] // Remove this if there are more tests
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
@ -453,7 +452,7 @@ mod test {
|
||||
#[cfg(feature = "const-fn")]
|
||||
#[test]
|
||||
fn static_new() {
|
||||
static mut _L: LinearMap<i32, i32, U8>= LinearMap::new();
|
||||
static mut _L: LinearMap<i32, i32, U8> = LinearMap::new();
|
||||
}
|
||||
|
||||
}
|
||||
|
83
src/sealed.rs
Normal file
83
src/sealed.rs
Normal file
@ -0,0 +1,83 @@
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
use core::sync::atomic::{AtomicU16, AtomicU8};
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
pub unsafe trait Uxx: Into<usize> + Send {
|
||||
#[doc(hidden)]
|
||||
fn truncate(x: usize) -> Self;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn load_acquire(x: *const Self) -> Self;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn load_relaxed(x: *const Self) -> Self;
|
||||
|
||||
#[doc(hidden)]
|
||||
fn store_release(x: *const Self, val: Self);
|
||||
}
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
unsafe impl Uxx for u8 {
|
||||
fn truncate(x: usize) -> Self {
|
||||
let max = ::core::u8::MAX;
|
||||
if x >= usize::from(max) {
|
||||
max
|
||||
} else {
|
||||
x as u8
|
||||
}
|
||||
}
|
||||
|
||||
fn load_acquire(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicU8)).load(Ordering::Acquire) }
|
||||
}
|
||||
|
||||
fn load_relaxed(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicU8)).load(Ordering::Relaxed) }
|
||||
}
|
||||
|
||||
fn store_release(x: *const Self, val: Self) {
|
||||
unsafe { (*(x as *const AtomicU8)).store(val, Ordering::Release) }
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
unsafe impl Uxx for u16 {
|
||||
fn truncate(x: usize) -> Self {
|
||||
let max = ::core::u16::MAX;
|
||||
if x >= usize::from(max) {
|
||||
max
|
||||
} else {
|
||||
x as u16
|
||||
}
|
||||
}
|
||||
|
||||
fn load_acquire(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicU16)).load(Ordering::Acquire) }
|
||||
}
|
||||
|
||||
fn load_relaxed(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicU16)).load(Ordering::Relaxed) }
|
||||
}
|
||||
|
||||
fn store_release(x: *const Self, val: Self) {
|
||||
unsafe { (*(x as *const AtomicU16)).store(val, Ordering::Release) }
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Uxx for usize {
|
||||
fn truncate(x: usize) -> Self {
|
||||
x
|
||||
}
|
||||
|
||||
fn load_acquire(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicUsize)).load(Ordering::Acquire) }
|
||||
}
|
||||
|
||||
fn load_relaxed(x: *const Self) -> Self {
|
||||
unsafe { (*(x as *const AtomicUsize)).load(Ordering::Relaxed) }
|
||||
}
|
||||
|
||||
fn store_release(x: *const Self, val: Self) {
|
||||
unsafe { (*(x as *const AtomicUsize)).store(val, Ordering::Release) }
|
||||
}
|
||||
}
|
@ -1,134 +1,36 @@
|
||||
//! Ring buffer
|
||||
//! Single producer single consumer queue
|
||||
|
||||
use core::cell::UnsafeCell;
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
use core::intrinsics;
|
||||
use core::ops::Add;
|
||||
use core::ptr;
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
use core::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use generic_array::typenum::{Sum, Unsigned, U1};
|
||||
use generic_array::{ArrayLength, GenericArray};
|
||||
|
||||
pub use self::spsc::{Consumer, Producer};
|
||||
pub use self::split::{Consumer, Producer};
|
||||
use __core::mem::MaybeUninit;
|
||||
use sealed;
|
||||
|
||||
mod spsc;
|
||||
|
||||
/// Types that can be used as `RingBuffer` indices: `u8`, `u16` and `usize
|
||||
///
|
||||
/// This trait is sealed and cannot be implemented outside of `heapless`.
|
||||
pub unsafe trait Uxx: Into<usize> + Send + private::Sealed {
|
||||
#[doc(hidden)]
|
||||
fn truncate(x: usize) -> Self;
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
#[doc(hidden)]
|
||||
fn load_acquire(x: *mut Self) -> Self {
|
||||
unsafe { intrinsics::atomic_load_acq(x) }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
#[doc(hidden)]
|
||||
fn load_acquire(x: *mut Self) -> Self;
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
#[doc(hidden)]
|
||||
fn load_relaxed(x: *mut Self) -> Self {
|
||||
unsafe { intrinsics::atomic_load_relaxed(x) }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
#[doc(hidden)]
|
||||
fn load_relaxed(x: *mut Self) -> Self;
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
#[doc(hidden)]
|
||||
fn store_release(x: *mut Self, val: Self) {
|
||||
unsafe { intrinsics::atomic_store_rel(x, val) }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
#[doc(hidden)]
|
||||
fn store_release(x: *mut Self, val: Self);
|
||||
}
|
||||
|
||||
mod private {
|
||||
pub trait Sealed {}
|
||||
|
||||
impl Sealed for usize {}
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
impl Sealed for u8 {}
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
impl Sealed for u16 {}
|
||||
}
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
unsafe impl Uxx for u8 {
|
||||
fn truncate(x: usize) -> Self {
|
||||
let max = ::core::u8::MAX;
|
||||
if x >= usize::from(max) {
|
||||
max - 1
|
||||
} else {
|
||||
x as u8
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
unsafe impl Uxx for u16 {
|
||||
fn truncate(x: usize) -> Self {
|
||||
let max = ::core::u16::MAX;
|
||||
if x >= usize::from(max) {
|
||||
max - 1
|
||||
} else {
|
||||
x as u16
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl Uxx for usize {
|
||||
fn truncate(x: usize) -> Self {
|
||||
x
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
fn load_acquire(x: *mut Self) -> Self {
|
||||
unsafe { (*(x as *mut AtomicUsize)).load(Ordering::Acquire) }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
fn load_relaxed(x: *mut Self) -> Self {
|
||||
unsafe { (*(x as *mut AtomicUsize)).load(Ordering::Relaxed) }
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "smaller-atomics"))]
|
||||
fn store_release(x: *mut Self, val: Self) {
|
||||
unsafe {
|
||||
(*(x as *mut AtomicUsize)).store(val, Ordering::Release);
|
||||
}
|
||||
}
|
||||
}
|
||||
mod split;
|
||||
|
||||
// Atomic{U8,U16, Usize} with no CAS operations that works on targets that have "no atomic support"
|
||||
// according to their specification
|
||||
struct Atomic<U>
|
||||
where
|
||||
U: Uxx,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
v: UnsafeCell<U>,
|
||||
}
|
||||
|
||||
impl<U> Atomic<U>
|
||||
where
|
||||
U: Uxx,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
const_fn!(const fn new(v: U) -> Atomic<U> {
|
||||
Atomic {
|
||||
v: UnsafeCell::new(v),
|
||||
const_fn! {
|
||||
const fn new(v: U) -> Atomic<U> {
|
||||
Atomic {
|
||||
v: UnsafeCell::new(v),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
fn get_mut(&mut self) -> &mut U {
|
||||
unsafe { &mut *self.v.get() }
|
||||
@ -147,28 +49,35 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A statically allocated ring buffer with a capacity of `N`
|
||||
/// A statically allocated single producer single consumer queue with a capacity of `N` elements
|
||||
///
|
||||
/// By default `RingBuffer` will use `usize` integers to hold the indices to its head and tail. For
|
||||
/// small ring buffers `usize` may be overkill. However, `RingBuffer`'s index type is generic and
|
||||
/// can be changed to `u8` or `u16` to reduce its footprint. The easiest to construct a `RingBuffer`
|
||||
/// with a smaller index type is to use the [`u8`] and [`u16`] constructors.
|
||||
/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `U16`, `U32`,
|
||||
/// etc.).
|
||||
///
|
||||
/// [`u8`]: struct.RingBuffer.html#method.u8
|
||||
/// [`u16`]: struct.RingBuffer.html#method.u16
|
||||
/// By default `spsc::Queue` will use `usize` integers to hold the indices to its head and tail. For
|
||||
/// small queues `usize` indices may be overkill. However, `spsc::Queue`'s index type is generic and
|
||||
/// can be changed to `u8` or `u16` to reduce its footprint. The easiest to construct a
|
||||
/// `spsc::Queue` with a smaller index type is to use the [`u8`] and [`u16`] constructors.
|
||||
///
|
||||
/// [`u8`]: struct.Queue.html#method.u8
|
||||
/// [`u16`]: struct.Queue.html#method.u16
|
||||
///
|
||||
/// *IMPORTANT*: `spsc::Queue<_, _, u8>` has a maximum capacity of 255 elements; `spsc::Queue<_, _,
|
||||
/// u16>` has a maximum capacity of 65535 elements.
|
||||
///
|
||||
/// # Examples
|
||||
///
|
||||
/// ```
|
||||
/// use heapless::RingBuffer;
|
||||
/// use heapless::spsc::Queue;
|
||||
/// use heapless::consts::*;
|
||||
///
|
||||
/// let mut rb: RingBuffer<u8, U3> = RingBuffer::new();
|
||||
/// let mut rb: Queue<u8, U4> = Queue::new();
|
||||
///
|
||||
/// assert!(rb.enqueue(0).is_ok());
|
||||
/// assert!(rb.enqueue(1).is_ok());
|
||||
/// assert!(rb.enqueue(2).is_ok());
|
||||
/// assert!(rb.enqueue(3).is_err()); // full
|
||||
/// assert!(rb.enqueue(3).is_ok());
|
||||
/// assert!(rb.enqueue(4).is_err()); // full
|
||||
///
|
||||
/// assert_eq!(rb.dequeue(), Some(0));
|
||||
/// ```
|
||||
@ -176,17 +85,17 @@ where
|
||||
/// ### Single producer single consumer mode
|
||||
///
|
||||
/// ```
|
||||
/// use heapless::RingBuffer;
|
||||
/// use heapless::spsc::Queue;
|
||||
/// use heapless::consts::*;
|
||||
///
|
||||
/// // static mut RB: RingBuffer<Event, U4> = RingBuffer::new(); // requires feature `const-fn`
|
||||
/// // static mut RB: Queue<Event, U4> = Queue::new(); // requires feature `const-fn`
|
||||
///
|
||||
/// static mut RB: Option<RingBuffer<Event, U4>> = None;
|
||||
/// static mut RB: Option<Queue<Event, U4>> = None;
|
||||
///
|
||||
/// enum Event { A, B }
|
||||
///
|
||||
/// fn main() {
|
||||
/// unsafe { RB = Some(RingBuffer::new()) };
|
||||
/// unsafe { RB = Some(Queue::new()) };
|
||||
/// // NOTE(unsafe) beware of aliasing the `consumer` end point
|
||||
/// let mut consumer = unsafe { RB.as_mut().unwrap().split().1 };
|
||||
///
|
||||
@ -218,11 +127,10 @@ where
|
||||
/// // ..
|
||||
/// }
|
||||
/// ```
|
||||
pub struct RingBuffer<T, N, U = usize>
|
||||
pub struct Queue<T, N, U = usize>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
// this is from where we dequeue items
|
||||
head: Atomic<U>,
|
||||
@ -230,21 +138,20 @@ where
|
||||
// this is where we enqueue new items
|
||||
tail: Atomic<U>,
|
||||
|
||||
buffer: MaybeUninit<GenericArray<T, Sum<N, U1>>>,
|
||||
buffer: MaybeUninit<GenericArray<T, N>>,
|
||||
}
|
||||
|
||||
impl<T, N, U> RingBuffer<T, N, U>
|
||||
impl<T, N, U> Queue<T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
/// Returns the maximum number of elements the ring buffer can hold
|
||||
/// Returns the maximum number of elements the queue can hold
|
||||
pub fn capacity(&self) -> U {
|
||||
U::truncate(N::to_usize())
|
||||
}
|
||||
|
||||
/// Returns `true` if the ring buffer has a length of 0
|
||||
/// Returns `true` if the queue has a length of 0
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.len_usize() == 0
|
||||
}
|
||||
@ -272,19 +179,14 @@ where
|
||||
let head = self.head.load_relaxed().into();
|
||||
let tail = self.tail.load_relaxed().into();
|
||||
|
||||
if head > tail {
|
||||
self.capacity().into() + 1 - head + tail
|
||||
} else {
|
||||
tail - head
|
||||
}
|
||||
tail.wrapping_sub(head)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T, N, U> Drop for RingBuffer<T, N, U>
|
||||
impl<T, N, U> Drop for Queue<T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
fn drop(&mut self) {
|
||||
for item in self {
|
||||
@ -295,11 +197,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, N, U> IntoIterator for &'a RingBuffer<T, N, U>
|
||||
impl<'a, T, N, U> IntoIterator for &'a Queue<T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
type Item = &'a T;
|
||||
type IntoIter = Iter<'a, T, N, U>;
|
||||
@ -309,11 +210,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, N, U> IntoIterator for &'a mut RingBuffer<T, N, U>
|
||||
impl<'a, T, N, U> IntoIterator for &'a mut Queue<T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
type Item = &'a mut T;
|
||||
type IntoIter = IterMut<'a, T, N, U>;
|
||||
@ -325,25 +225,24 @@ where
|
||||
|
||||
macro_rules! impl_ {
|
||||
($uxx:ident) => {
|
||||
impl<T, N> RingBuffer<T, N, $uxx>
|
||||
impl<T, N> Queue<T, N, $uxx>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
const_fn!(
|
||||
/// Creates an empty ring buffer with a fixed capacity of `N`
|
||||
const_fn! {
|
||||
/// Creates an empty queue with a fixed capacity of `N`
|
||||
pub const fn $uxx() -> Self {
|
||||
RingBuffer {
|
||||
Queue {
|
||||
buffer: unsafe { MaybeUninit::uninitialized() },
|
||||
head: Atomic::new(0),
|
||||
tail: Atomic::new(0),
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/// Returns the item in the front of the queue, or `None` if the queue is empty
|
||||
pub fn dequeue(&mut self) -> Option<T> {
|
||||
let n = self.capacity() + 1;
|
||||
let cap = self.capacity();
|
||||
|
||||
let head = self.head.get_mut();
|
||||
let tail = self.tail.get_mut();
|
||||
@ -351,8 +250,8 @@ macro_rules! impl_ {
|
||||
let buffer = unsafe { self.buffer.get_ref() };
|
||||
|
||||
if *head != *tail {
|
||||
let item = unsafe { ptr::read(buffer.get_unchecked(usize::from(*head))) };
|
||||
*head = (*head + 1) % n;
|
||||
let item = unsafe { ptr::read(buffer.get_unchecked(usize::from(*head % cap))) };
|
||||
*head = head.wrapping_add(1);
|
||||
Some(item)
|
||||
} else {
|
||||
None
|
||||
@ -363,38 +262,37 @@ macro_rules! impl_ {
|
||||
///
|
||||
/// Returns back the `item` if the queue is full
|
||||
pub fn enqueue(&mut self, item: T) -> Result<(), T> {
|
||||
let n = self.capacity() + 1;
|
||||
|
||||
let cap = self.capacity();
|
||||
let head = *self.head.get_mut();
|
||||
let tail = *self.tail.get_mut();
|
||||
|
||||
let next_tail = (tail + 1) % n;
|
||||
if next_tail != head {
|
||||
self.enqueue_unchecked(item);
|
||||
Ok(())
|
||||
} else {
|
||||
if tail.wrapping_sub(head) > cap - 1 {
|
||||
Err(item)
|
||||
} else {
|
||||
unsafe { self.enqueue_unchecked(item) }
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds an `item` to the end of the queue, without checking if it's full
|
||||
///
|
||||
/// **WARNING** If the queue is full this operation will make the queue appear empty to
|
||||
/// the `Consumer`, thus *leaking* (destructors won't run) all the elements that were in
|
||||
/// the queue.
|
||||
pub fn enqueue_unchecked(&mut self, item: T) {
|
||||
let n = self.capacity() + 1;
|
||||
|
||||
/// # Unsafety
|
||||
///
|
||||
/// If the queue is full this operation will leak a value (T's destructor won't run on
|
||||
/// the value that got overwritten by `item`), *and* will allow the `dequeue` operation
|
||||
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
|
||||
/// twice.
|
||||
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
|
||||
let cap = self.capacity();
|
||||
let tail = self.tail.get_mut();
|
||||
|
||||
let buffer = unsafe { self.buffer.get_mut() };
|
||||
let buffer = self.buffer.get_mut();
|
||||
|
||||
let next_tail = (*tail + 1) % n;
|
||||
// NOTE(ptr::write) the memory slot that we are about to write to is
|
||||
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
|
||||
// uninitialized memory
|
||||
unsafe { ptr::write(buffer.get_unchecked_mut(usize::from(*tail)), item) }
|
||||
*tail = next_tail;
|
||||
ptr::write(buffer.get_unchecked_mut(usize::from(*tail % cap)), item);
|
||||
*tail = tail.wrapping_add(1);
|
||||
}
|
||||
|
||||
/// Returns the number of elements in the queue
|
||||
@ -403,7 +301,7 @@ macro_rules! impl_ {
|
||||
let tail = self.tail.load_relaxed();
|
||||
|
||||
if head > tail {
|
||||
self.capacity() + 1 - head + tail
|
||||
tail.wrapping_sub(head)
|
||||
} else {
|
||||
tail - head
|
||||
}
|
||||
@ -412,17 +310,16 @@ macro_rules! impl_ {
|
||||
};
|
||||
}
|
||||
|
||||
impl<T, N> RingBuffer<T, N, usize>
|
||||
impl<T, N> Queue<T, N, usize>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
const_fn!(
|
||||
/// Alias for [`RingBuffer::usize`](struct.RingBuffer.html#method.usize)
|
||||
const_fn! {
|
||||
/// Alias for [`spsc::Queue::usize`](struct.Queue.html#method.usize)
|
||||
pub const fn new() -> Self {
|
||||
RingBuffer::usize()
|
||||
Queue::usize()
|
||||
}
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
@ -431,28 +328,26 @@ impl_!(u8);
|
||||
impl_!(u16);
|
||||
impl_!(usize);
|
||||
|
||||
/// An iterator over a ring buffer items
|
||||
/// An iterator over the items of a queue
|
||||
pub struct Iter<'a, T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned + 'a,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T> + 'a,
|
||||
T: 'a,
|
||||
U: 'a + Uxx,
|
||||
U: 'a + sealed::Uxx,
|
||||
{
|
||||
rb: &'a RingBuffer<T, N, U>,
|
||||
rb: &'a Queue<T, N, U>,
|
||||
index: usize,
|
||||
len: usize,
|
||||
}
|
||||
|
||||
/// A mutable iterator over a ring buffer items
|
||||
/// A mutable iterator over the items of a queue
|
||||
pub struct IterMut<'a, T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned + 'a,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T> + 'a,
|
||||
T: 'a,
|
||||
U: 'a + Uxx,
|
||||
U: 'a + sealed::Uxx,
|
||||
{
|
||||
rb: &'a mut RingBuffer<T, N, U>,
|
||||
rb: &'a mut Queue<T, N, U>,
|
||||
index: usize,
|
||||
len: usize,
|
||||
}
|
||||
@ -461,10 +356,9 @@ macro_rules! iterator {
|
||||
(struct $name:ident -> $elem:ty, $ptr:ty, $asref:ident, $asptr:ident, $mkref:ident) => {
|
||||
impl<'a, T, N, U> Iterator for $name<'a, T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
T: 'a,
|
||||
U: 'a + Uxx,
|
||||
U: 'a + sealed::Uxx,
|
||||
{
|
||||
type Item = $elem;
|
||||
|
||||
@ -472,10 +366,10 @@ macro_rules! iterator {
|
||||
if self.index < self.len {
|
||||
let head = self.rb.head.load_relaxed().into();
|
||||
|
||||
let capacity = self.rb.capacity().into() + 1;
|
||||
let cap = self.rb.capacity().into();
|
||||
let buffer = unsafe { self.rb.buffer.$asref() };
|
||||
let ptr: $ptr = buffer.$asptr();
|
||||
let i = (head + self.index) % capacity;
|
||||
let i = (head + self.index) % cap;
|
||||
self.index += 1;
|
||||
Some(unsafe { $mkref!(*ptr.offset(i as isize)) })
|
||||
} else {
|
||||
@ -504,12 +398,12 @@ iterator!(struct IterMut -> &'a mut T, *mut T, get_mut, as_mut_ptr, make_ref_mut
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use consts::*;
|
||||
use RingBuffer;
|
||||
use spsc::Queue;
|
||||
|
||||
#[cfg(feature = "const-fn")]
|
||||
#[test]
|
||||
fn static_new() {
|
||||
static mut _R: RingBuffer<i32, U4> = RingBuffer::new();
|
||||
static mut _Q: Queue<i32, U4> = Queue::new();
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -535,7 +429,7 @@ mod tests {
|
||||
static mut COUNT: i32 = 0;
|
||||
|
||||
{
|
||||
let mut v: RingBuffer<Droppable, U4> = RingBuffer::new();
|
||||
let mut v: Queue<Droppable, U4> = Queue::new();
|
||||
v.enqueue(Droppable::new()).ok().unwrap();
|
||||
v.enqueue(Droppable::new()).ok().unwrap();
|
||||
v.dequeue().unwrap();
|
||||
@ -544,7 +438,7 @@ mod tests {
|
||||
assert_eq!(unsafe { COUNT }, 0);
|
||||
|
||||
{
|
||||
let mut v: RingBuffer<Droppable, U4> = RingBuffer::new();
|
||||
let mut v: Queue<Droppable, U4> = Queue::new();
|
||||
v.enqueue(Droppable::new()).ok().unwrap();
|
||||
v.enqueue(Droppable::new()).ok().unwrap();
|
||||
}
|
||||
@ -554,18 +448,19 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn full() {
|
||||
let mut rb: RingBuffer<i32, U3> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U4> = Queue::new();
|
||||
|
||||
rb.enqueue(0).unwrap();
|
||||
rb.enqueue(1).unwrap();
|
||||
rb.enqueue(2).unwrap();
|
||||
rb.enqueue(3).unwrap();
|
||||
|
||||
assert!(rb.enqueue(3).is_err());
|
||||
assert!(rb.enqueue(4).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn iter() {
|
||||
let mut rb: RingBuffer<i32, U4> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U4> = Queue::new();
|
||||
|
||||
rb.enqueue(0).unwrap();
|
||||
rb.enqueue(1).unwrap();
|
||||
@ -581,7 +476,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn iter_mut() {
|
||||
let mut rb: RingBuffer<i32, U4> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U4> = Queue::new();
|
||||
|
||||
rb.enqueue(0).unwrap();
|
||||
rb.enqueue(1).unwrap();
|
||||
@ -597,7 +492,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn sanity() {
|
||||
let mut rb: RingBuffer<i32, U4> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U4> = Queue::new();
|
||||
|
||||
assert_eq!(rb.dequeue(), None);
|
||||
|
||||
@ -611,9 +506,9 @@ mod tests {
|
||||
#[test]
|
||||
#[cfg(feature = "smaller-atomics")]
|
||||
fn u8() {
|
||||
let mut rb: RingBuffer<u8, U256, _> = RingBuffer::u8();
|
||||
let mut rb: Queue<u8, U256, _> = Queue::u8();
|
||||
|
||||
for _ in 0..254 {
|
||||
for _ in 0..255 {
|
||||
rb.enqueue(0).unwrap();
|
||||
}
|
||||
|
||||
@ -622,7 +517,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn wrap_around() {
|
||||
let mut rb: RingBuffer<i32, U3> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U3> = Queue::new();
|
||||
|
||||
rb.enqueue(0).unwrap();
|
||||
rb.enqueue(1).unwrap();
|
||||
@ -638,7 +533,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn ready_flag() {
|
||||
let mut rb: RingBuffer<i32, U2> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U2> = Queue::new();
|
||||
let (mut p, mut c) = rb.split();
|
||||
assert_eq!(c.ready(), false);
|
||||
assert_eq!(p.ready(), true);
|
@ -1,19 +1,17 @@
|
||||
use core::marker::PhantomData;
|
||||
use core::ops::Add;
|
||||
use core::ptr::{self, NonNull};
|
||||
|
||||
use generic_array::typenum::{Sum, Unsigned, U1};
|
||||
use generic_array::ArrayLength;
|
||||
|
||||
use ring_buffer::{RingBuffer, Uxx};
|
||||
use sealed;
|
||||
use spsc::Queue;
|
||||
|
||||
impl<T, N, U> RingBuffer<T, N, U>
|
||||
impl<T, N, U> Queue<T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
/// Splits a statically allocated ring buffer into producer and consumer end points
|
||||
/// Splits a statically allocated queue into producer and consumer end points
|
||||
pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, N, U>, Consumer<'rb, T, N, U>) {
|
||||
(
|
||||
Producer {
|
||||
@ -28,54 +26,47 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// A ring buffer "consumer"; it can dequeue items from the ring buffer
|
||||
// NOTE the consumer semantically owns the `head` pointer of the ring buffer
|
||||
/// A queue "consumer"; it can dequeue items from the queue
|
||||
// NOTE the consumer semantically owns the `head` pointer of the queue
|
||||
pub struct Consumer<'a, T, N, U = usize>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
// XXX do we need to use `NonNull` (for soundness) here?
|
||||
rb: NonNull<RingBuffer<T, N, U>>,
|
||||
rb: NonNull<Queue<T, N, U>>,
|
||||
_marker: PhantomData<&'a ()>,
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, N, U> Send for Consumer<'a, T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
T: Send,
|
||||
U: Uxx,
|
||||
U: sealed::Uxx,
|
||||
{}
|
||||
|
||||
/// A ring buffer "producer"; it can enqueue items into the ring buffer
|
||||
// NOTE the producer semantically owns the `tail` pointer of the ring buffer
|
||||
/// A queue "producer"; it can enqueue items into the queue
|
||||
// NOTE the producer semantically owns the `tail` pointer of the queue
|
||||
pub struct Producer<'a, T, N, U = usize>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
U: Uxx,
|
||||
N: ArrayLength<T>,
|
||||
U: sealed::Uxx,
|
||||
{
|
||||
// XXX do we need to use `NonNull` (for soundness) here?
|
||||
rb: NonNull<RingBuffer<T, N, U>>,
|
||||
rb: NonNull<Queue<T, N, U>>,
|
||||
_marker: PhantomData<&'a ()>,
|
||||
}
|
||||
|
||||
unsafe impl<'a, T, N, U> Send for Producer<'a, T, N, U>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
T: Send,
|
||||
U: Uxx,
|
||||
U: sealed::Uxx,
|
||||
{}
|
||||
|
||||
macro_rules! impl_ {
|
||||
($uxx:ident) => {
|
||||
impl<'a, T, N> Consumer<'a, T, N, $uxx>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
/// Returns if there are any items to dequeue. When this returns true, at least the
|
||||
/// first subsequent dequeue will succeed.
|
||||
@ -111,24 +102,24 @@ macro_rules! impl_ {
|
||||
unsafe fn _dequeue(&mut self, head: $uxx) -> T {
|
||||
let rb = self.rb.as_ref();
|
||||
|
||||
let n = rb.capacity() + 1;
|
||||
let cap = rb.capacity();
|
||||
let buffer = rb.buffer.get_ref();
|
||||
|
||||
let item = ptr::read(buffer.get_unchecked(usize::from(head)));
|
||||
rb.head.store_release((head + 1) % n);
|
||||
let item = ptr::read(buffer.get_unchecked(usize::from(head % cap)));
|
||||
rb.head.store_release(head.wrapping_add(1));
|
||||
item
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, T, N> Producer<'a, T, N, $uxx>
|
||||
where
|
||||
N: Add<U1> + Unsigned,
|
||||
Sum<N, U1>: ArrayLength<T>,
|
||||
N: ArrayLength<T>,
|
||||
{
|
||||
/// Returns if there is any space to enqueue a new item. When this returns true, at
|
||||
/// least the first subsequent enqueue will succeed.
|
||||
pub fn ready(&self) -> bool {
|
||||
let n = unsafe { self.rb.as_ref().capacity() + 1 };
|
||||
let cap = unsafe { self.rb.as_ref().capacity() };
|
||||
|
||||
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
|
||||
// NOTE we could replace this `load_acquire` with a `load_relaxed` and this method
|
||||
// would be sound on most architectures but that change would result in UB according
|
||||
@ -136,15 +127,14 @@ macro_rules! impl_ {
|
||||
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
|
||||
// more details.
|
||||
let head = unsafe { self.rb.as_ref().head.load_acquire() };
|
||||
let next_tail = (tail + 1) % n;
|
||||
return next_tail != head;
|
||||
return head.wrapping_add(cap) != tail;
|
||||
}
|
||||
|
||||
/// Adds an `item` to the end of the queue
|
||||
///
|
||||
/// Returns back the `item` if the queue is full
|
||||
pub fn enqueue(&mut self, item: T) -> Result<(), T> {
|
||||
let n = unsafe { self.rb.as_ref().capacity() + 1 };
|
||||
let cap = unsafe { self.rb.as_ref().capacity() };
|
||||
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
|
||||
// NOTE we could replace this `load_acquire` with a `load_relaxed` and this method
|
||||
// would be sound on most architectures but that change would result in UB according
|
||||
@ -152,43 +142,40 @@ macro_rules! impl_ {
|
||||
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
|
||||
// more details.
|
||||
let head = unsafe { self.rb.as_ref().head.load_acquire() };
|
||||
let next_tail = (tail + 1) % n;
|
||||
if next_tail != head {
|
||||
|
||||
if tail.wrapping_sub(head) > cap - 1 {
|
||||
Err(item)
|
||||
} else {
|
||||
unsafe { self._enqueue(tail, item) };
|
||||
Ok(())
|
||||
} else {
|
||||
Err(item)
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds an `item` to the end of the queue without checking if it's full
|
||||
///
|
||||
/// **WARNING** If the queue is full this operation will make the queue appear empty to
|
||||
/// the `Consumer`, thus *leaking* (destructors won't run) all the elements that were in
|
||||
/// the queue.
|
||||
pub fn enqueue_unchecked(&mut self, item: T) {
|
||||
unsafe {
|
||||
let tail = self.rb.as_ref().tail.load_relaxed();
|
||||
debug_assert_ne!(
|
||||
(tail + 1) % (self.rb.as_ref().capacity() + 1),
|
||||
self.rb.as_ref().head.load_acquire()
|
||||
);
|
||||
self._enqueue(tail, item);
|
||||
}
|
||||
/// # Unsafety
|
||||
///
|
||||
/// If the queue is full this operation will leak a value (T's destructor won't run on
|
||||
/// the value that got overwritten by `item`), *and* will allow the `dequeue` operation
|
||||
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
|
||||
/// twice.
|
||||
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
|
||||
let tail = self.rb.as_ref().tail.load_relaxed();
|
||||
debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().head.load_acquire());
|
||||
self._enqueue(tail, item);
|
||||
}
|
||||
|
||||
unsafe fn _enqueue(&mut self, tail: $uxx, item: T) {
|
||||
let rb = self.rb.as_mut();
|
||||
|
||||
let n = rb.capacity() + 1;
|
||||
let cap = rb.capacity();
|
||||
let buffer = rb.buffer.get_mut();
|
||||
|
||||
let next_tail = (tail + 1) % n;
|
||||
// NOTE(ptr::write) the memory slot that we are about to write to is
|
||||
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
|
||||
// uninitialized memory
|
||||
ptr::write(buffer.get_unchecked_mut(usize::from(tail)), item);
|
||||
rb.tail.store_release(next_tail);
|
||||
ptr::write(buffer.get_unchecked_mut(usize::from(tail % cap)), item);
|
||||
rb.tail.store_release(tail.wrapping_add(1));
|
||||
}
|
||||
}
|
||||
};
|
||||
@ -203,11 +190,11 @@ impl_!(usize);
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use consts::*;
|
||||
use RingBuffer;
|
||||
use spsc::Queue;
|
||||
|
||||
#[test]
|
||||
fn sanity() {
|
||||
let mut rb: RingBuffer<i32, U2> = RingBuffer::new();
|
||||
let mut rb: Queue<i32, U2> = Queue::new();
|
||||
|
||||
let (mut p, mut c) = rb.split();
|
||||
|
@ -17,9 +17,8 @@ impl<N> String<N>
|
||||
where
|
||||
N: ArrayLength<u8>,
|
||||
{
|
||||
|
||||
#[inline]
|
||||
const_fn!(
|
||||
const_fn! {
|
||||
/// Constructs a new, empty `String` with a fixed capacity of `N`
|
||||
///
|
||||
/// # Examples
|
||||
@ -35,7 +34,7 @@ where
|
||||
pub const fn new() -> Self {
|
||||
String { vec: Vec::new() }
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
/// Converts a vector of bytes into a `String`.
|
||||
///
|
||||
@ -225,7 +224,8 @@ where
|
||||
pub fn push(&mut self, c: char) -> Result<(), ()> {
|
||||
match c.len_utf8() {
|
||||
1 => self.vec.push(c as u8).map_err(|_| {}),
|
||||
_ => self.vec
|
||||
_ => self
|
||||
.vec
|
||||
.extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()),
|
||||
}
|
||||
}
|
||||
@ -513,11 +513,7 @@ macro_rules! impl_eq {
|
||||
impl_eq! { String<N>, str }
|
||||
impl_eq! { String<N>, &'a str }
|
||||
|
||||
impl<N> Eq for String<N>
|
||||
where
|
||||
N: ArrayLength<u8>,
|
||||
{
|
||||
}
|
||||
impl<N> Eq for String<N> where N: ArrayLength<u8> {}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
@ -8,13 +8,13 @@ use std::thread;
|
||||
|
||||
use generic_array::typenum::Unsigned;
|
||||
use heapless::consts::*;
|
||||
use heapless::RingBuffer;
|
||||
use heapless::spsc;
|
||||
use scoped_threadpool::Pool;
|
||||
|
||||
#[test]
|
||||
fn once() {
|
||||
static mut RB: Option<RingBuffer<i32, U4>> = None;
|
||||
unsafe{ RB = Some(RingBuffer::new()) };
|
||||
static mut RB: Option<spsc::Queue<i32, U4>> = None;
|
||||
unsafe { RB = Some(spsc::Queue::new()) };
|
||||
|
||||
let rb = unsafe { RB.as_mut().unwrap() };
|
||||
|
||||
@ -35,8 +35,8 @@ fn once() {
|
||||
|
||||
#[test]
|
||||
fn twice() {
|
||||
static mut RB: Option<RingBuffer<i32, U4>> = None;
|
||||
unsafe{ RB = Some(RingBuffer::new()) };
|
||||
static mut RB: Option<spsc::Queue<i32, U4>> = None;
|
||||
unsafe { RB = Some(spsc::Queue::new()) };
|
||||
|
||||
let rb = unsafe { RB.as_mut().unwrap() };
|
||||
|
||||
@ -58,7 +58,7 @@ fn twice() {
|
||||
|
||||
#[test]
|
||||
fn scoped() {
|
||||
let mut rb: RingBuffer<i32, U4> = RingBuffer::new();
|
||||
let mut rb: spsc::Queue<i32, U4> = spsc::Queue::new();
|
||||
|
||||
rb.enqueue(0).unwrap();
|
||||
|
||||
@ -83,7 +83,7 @@ fn scoped() {
|
||||
fn contention() {
|
||||
type N = U1024;
|
||||
|
||||
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
|
||||
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
|
||||
|
||||
{
|
||||
let (mut p, mut c) = rb.split();
|
||||
@ -127,7 +127,7 @@ fn contention() {
|
||||
fn unchecked() {
|
||||
type N = U1024;
|
||||
|
||||
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
|
||||
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
|
||||
|
||||
for _ in 0..N::to_usize() / 2 {
|
||||
rb.enqueue(1).unwrap();
|
||||
@ -139,7 +139,9 @@ fn unchecked() {
|
||||
Pool::new(2).scoped(move |scope| {
|
||||
scope.execute(move || {
|
||||
for _ in 0..N::to_usize() / 2 {
|
||||
p.enqueue_unchecked(2);
|
||||
unsafe {
|
||||
p.enqueue_unchecked(2);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@ -161,7 +163,7 @@ fn unchecked() {
|
||||
#[test]
|
||||
fn len_properly_wraps() {
|
||||
type N = U3;
|
||||
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
|
||||
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
|
||||
|
||||
rb.enqueue(1).unwrap();
|
||||
assert_eq!(rb.len(), 1);
|
||||
@ -178,7 +180,7 @@ fn len_properly_wraps() {
|
||||
#[test]
|
||||
fn iterator_properly_wraps() {
|
||||
type N = U3;
|
||||
let mut rb: RingBuffer<u8, N> = RingBuffer::new();
|
||||
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
|
||||
|
||||
rb.enqueue(1).unwrap();
|
||||
rb.dequeue();
|
||||
|
Loading…
x
Reference in New Issue
Block a user