Merge pull request #198 from japaric/const_generics

Const generics port
This commit is contained in:
Emil Fresk 2021-04-22 19:07:27 +02:00 committed by GitHub
commit bd32ab8383
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 2066 additions and 1828 deletions

View File

@ -105,7 +105,7 @@ jobs:
toolchain:
- stable
- nightly
- 1.36.0
- 1.51.0
features:
- serde
buildtype:
@ -242,7 +242,7 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: 1.36.0
toolchain: 1.51.0
target: x86_64-unknown-linux-gnu
override: true

View File

@ -7,6 +7,19 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
## [Unreleased]
### Changed
- [breaking-change] Converted all data structures to use the `const generics` MVP
- [breaking-change] `HistoryBuffer` is now working with const constructors and non-`Copy` data
- [breaking-change] `HistoryBuffer::as_slice` and others now only return initialized values
- [breaking-change] `MultiCore`/`SingleCore` is now removed from `spsc::Queue`
- [breaking-change] `spsc::Queue` is now `usize` only
- [breaking-change] `spsc::Queue` now sacrifices one element for correctness (see issue #207), i.e. it creates an `N - 1` sized queue instead of the old that generated an size `N` queue
- `Pool` and `MPMC` now works on `thumbv6m`
- [breaking-change] `String` has had `utf8` related methods removed as this can be done via `str`
- [breaking-change] No data structures implement `AsSlice` traits any more, now using `AsRef` and `AsMut`
- `IndexMap::new()` is now a `const-fn`
## [v0.6.1] - 2021-03-02
### Fixed

View File

@ -2,6 +2,7 @@
authors = [
"Jorge Aparicio <jorge@japaric.io>",
"Per Lindgren <per.lindgren@ltu.se>",
"Emil Fresk <emil.fresk@gmail.com>",
]
categories = [
"data-structures",
@ -31,10 +32,11 @@ __trybuild = []
[target.x86_64-unknown-linux-gnu.dev-dependencies]
scoped_threadpool = "0.1.8"
[target.thumbv6m-none-eabi.dependencies]
atomic-polyfill = "0.1.2"
[dependencies]
as-slice = "0.1.5"
generic-array = "0.14.4"
hash32 = "0.1.0"
hash32 = "0.2.1"
[dependencies.serde]
version = "1"

View File

@ -24,10 +24,7 @@ fn main() -> Result<(), Box<dyn Error>> {
// built-in targets with no atomic / CAS support as of nightly-2019-12-17
// see the `no-atomics.sh` / `no-cas.sh` script sitting next to this file
match &target[..] {
"thumbv6m-none-eabi"
| "msp430-none-elf"
| "riscv32i-unknown-none-elf"
| "riscv32imc-unknown-none-elf" => {}
"msp430-none-elf" | "riscv32i-unknown-none-elf" | "riscv32imc-unknown-none-elf" => {}
_ => {
println!("cargo:rustc-cfg=has_cas");

View File

@ -1,7 +1,7 @@
use heapless::{consts, spsc::Queue};
use heapless::spsc::Queue;
fn main() {
let mut q: Queue<u8, consts::U4> = Queue::new();
let mut q: Queue<u8, 4> = Queue::new();
let (_p, mut _c) = q.split();
q.enqueue(0).unwrap();

View File

@ -3,8 +3,8 @@
use core::marker::PhantomData;
use heapless::{
consts,
spsc::{Consumer, Producer, Queue},
HistoryBuffer, Vec,
};
type NotSend = PhantomData<*const ()>;
@ -16,8 +16,9 @@ where
}
fn main() {
is_send::<Consumer<NotSend, consts::U4>>();
is_send::<Producer<NotSend, consts::U4>>();
is_send::<Queue<NotSend, consts::U4>>();
is_send::<heapless::Vec<NotSend, consts::U4>>();
is_send::<Consumer<NotSend, 4>>();
is_send::<Producer<NotSend, 4>>();
is_send::<Queue<NotSend, 4>>();
is_send::<Vec<NotSend, 4>>();
is_send::<HistoryBuffer<NotSend, 4>>();
}

View File

@ -1,83 +1,89 @@
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:19:5
|
19 | is_send::<Consumer<NotSend, consts::U4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
12 | fn is_send<T>()
| ------- required by a bound in this
13 | where
14 | T: Send,
| ---- required by this bound in `is_send`
...
19 | is_send::<Consumer<NotSend, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()`
= note: required because it appears within the type `std::marker::PhantomData<*const ()>`
= note: required because of the requirements on the impl of `std::marker::Send` for `heapless::spsc::split::Consumer<'_, std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
note: required by `is_send`
--> $DIR/not-send.rs:12:1
|
12 | / fn is_send<T>()
13 | | where
14 | | T: Send,
15 | | {
16 | | }
| |_^
= help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because of the requirements on the impl of `Send` for `Consumer<'_, PhantomData<*const ()>, 4_usize>`
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:20:5
|
20 | is_send::<Producer<NotSend, consts::U4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
12 | fn is_send<T>()
| ------- required by a bound in this
13 | where
14 | T: Send,
| ---- required by this bound in `is_send`
...
20 | is_send::<Producer<NotSend, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()`
= note: required because it appears within the type `std::marker::PhantomData<*const ()>`
= note: required because of the requirements on the impl of `std::marker::Send` for `heapless::spsc::split::Producer<'_, std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
note: required by `is_send`
--> $DIR/not-send.rs:12:1
|
12 | / fn is_send<T>()
13 | | where
14 | | T: Send,
15 | | {
16 | | }
| |_^
= help: within `PhantomData<*const ()>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because of the requirements on the impl of `Send` for `Producer<'_, PhantomData<*const ()>, 4_usize>`
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:21:5
|
21 | is_send::<Queue<NotSend, consts::U4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
12 | fn is_send<T>()
| ------- required by a bound in this
13 | where
14 | T: Send,
| ---- required by this bound in `is_send`
...
21 | is_send::<Queue<NotSend, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()`
= note: required because it appears within the type `std::marker::PhantomData<*const ()>`
= note: required because of the requirements on the impl of `std::marker::Send` for `generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
= note: required because it appears within the type `std::mem::ManuallyDrop<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `std::mem::MaybeUninit<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `heapless::i::Queue<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `heapless::spsc::Queue<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
note: required by `is_send`
--> $DIR/not-send.rs:12:1
|
12 | / fn is_send<T>()
13 | | where
14 | | T: Send,
15 | | {
16 | | }
| |_^
= help: within `Queue<PhantomData<*const ()>, 4_usize>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because it appears within the type `ManuallyDrop<PhantomData<*const ()>>`
= note: required because it appears within the type `MaybeUninit<PhantomData<*const ()>>`
= note: required because it appears within the type `UnsafeCell<MaybeUninit<PhantomData<*const ()>>>`
= note: required because it appears within the type `[UnsafeCell<MaybeUninit<PhantomData<*const ()>>>; 4]`
= note: required because it appears within the type `Queue<PhantomData<*const ()>, 4_usize>`
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:22:5
|
22 | is_send::<heapless::Vec<NotSend, consts::U4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
12 | fn is_send<T>()
| ------- required by a bound in this
13 | where
14 | T: Send,
| ---- required by this bound in `is_send`
...
22 | is_send::<Vec<NotSend, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `std::marker::PhantomData<*const ()>`, the trait `std::marker::Send` is not implemented for `*const ()`
= note: required because it appears within the type `std::marker::PhantomData<*const ()>`
= note: required because of the requirements on the impl of `std::marker::Send` for `generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
= note: required because it appears within the type `std::mem::ManuallyDrop<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `std::mem::MaybeUninit<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `heapless::i::Vec<generic_array::GenericArray<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>>`
= note: required because it appears within the type `heapless::vec::Vec<std::marker::PhantomData<*const ()>, typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UInt<typenum::uint::UTerm, typenum::bit::B1>, typenum::bit::B0>, typenum::bit::B0>>`
note: required by `is_send`
--> $DIR/not-send.rs:12:1
= help: within `heapless::Vec<PhantomData<*const ()>, 4_usize>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because it appears within the type `[PhantomData<*const ()>; 4]`
= note: required because it appears within the type `ManuallyDrop<[PhantomData<*const ()>; 4]>`
= note: required because it appears within the type `MaybeUninit<[PhantomData<*const ()>; 4]>`
= note: required because it appears within the type `heapless::Vec<PhantomData<*const ()>, 4_usize>`
error[E0277]: `*const ()` cannot be sent between threads safely
--> $DIR/not-send.rs:23:5
|
12 | / fn is_send<T>()
13 | | where
14 | | T: Send,
15 | | {
16 | | }
| |_^
12 | fn is_send<T>()
| ------- required by a bound in this
13 | where
14 | T: Send,
| ---- required by this bound in `is_send`
...
23 | is_send::<HistoryBuffer<NotSend, 4>>();
| ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ `*const ()` cannot be sent between threads safely
|
= help: within `HistoryBuffer<PhantomData<*const ()>, 4_usize>`, the trait `Send` is not implemented for `*const ()`
= note: required because it appears within the type `PhantomData<*const ()>`
= note: required because it appears within the type `ManuallyDrop<PhantomData<*const ()>>`
= note: required because it appears within the type `MaybeUninit<PhantomData<*const ()>>`
= note: required because it appears within the type `[MaybeUninit<PhantomData<*const ()>>; 4]`
= note: required because it appears within the type `HistoryBuffer<PhantomData<*const ()>, 4_usize>`

View File

@ -16,9 +16,8 @@ use core::{
ptr, slice,
};
use generic_array::{ArrayLength, GenericArray};
use crate::sealed::binary_heap::Kind;
use crate::vec::Vec;
/// Min-heap
pub enum Min {}
@ -26,17 +25,6 @@ pub enum Min {}
/// Max-heap
pub enum Max {}
impl<A, K> crate::i::BinaryHeap<A, K> {
/// `BinaryHeap` `const` constructor; wrap the returned value in
/// [`BinaryHeap`](../struct.BinaryHeap.html)
pub const fn new() -> Self {
Self {
_kind: PhantomData,
data: crate::i::Vec::new(),
}
}
}
/// A priority queue implemented with a binary heap.
///
/// This can be either a min-heap or a max-heap.
@ -47,9 +35,8 @@ impl<A, K> crate::i::BinaryHeap<A, K> {
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// // We can use peek to look at the next item in the heap. In this case,
/// // there's no items in there yet so we get None.
@ -84,51 +71,51 @@ impl<A, K> crate::i::BinaryHeap<A, K> {
/// // The heap should now be empty.
/// assert!(heap.is_empty())
/// ```
pub struct BinaryHeap<T, N, KIND>(
#[doc(hidden)] pub crate::i::BinaryHeap<GenericArray<T, N>, KIND>,
)
where
T: Ord,
N: ArrayLength<T>,
KIND: Kind;
impl<T, N, K> BinaryHeap<T, N, K>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
pub struct BinaryHeap<T, K, const N: usize> {
pub(crate) _kind: PhantomData<K>,
pub(crate) data: Vec<T, N>,
}
impl<T, K, const N: usize> BinaryHeap<T, K, N> {
/* Constructors */
/// Creates an empty BinaryHeap as a $K-heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// // allocate the binary heap on the stack
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(4).unwrap();
///
/// // allocate the binary heap in a static variable
/// static mut HEAP: BinaryHeap<i32, U8, Max> = BinaryHeap(heapless::i::BinaryHeap::new());
/// static mut HEAP: BinaryHeap<i32, Max, 8> = BinaryHeap::new();
/// ```
pub fn new() -> Self {
BinaryHeap(crate::i::BinaryHeap::new())
pub const fn new() -> Self {
Self {
_kind: PhantomData,
data: Vec::new(),
}
}
}
impl<T, K, const N: usize> BinaryHeap<T, K, N>
where
T: Ord,
K: Kind,
{
/* Public API */
/// Returns the capacity of the binary heap.
pub fn capacity(&self) -> usize {
self.0.data.capacity()
self.data.capacity()
}
/// Drops all items from the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
@ -139,32 +126,30 @@ where
/// assert!(heap.is_empty());
/// ```
pub fn clear(&mut self) {
self.0.data.clear()
self.data.clear()
}
/// Returns the length of the binary heap.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
/// assert_eq!(heap.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.0.data.len
self.data.len()
}
/// Checks if the binary heap is empty.
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
///
/// assert!(heap.is_empty());
///
@ -182,9 +167,8 @@ where
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(2).unwrap();
/// heap.push(3).unwrap();
@ -197,7 +181,7 @@ where
/// }
/// ```
pub fn iter(&self) -> slice::Iter<'_, T> {
self.0.data.as_slice().iter()
self.data.as_slice().iter()
}
/// Returns a mutable iterator visiting all values in the underlying vector, in arbitrary order.
@ -205,7 +189,7 @@ where
/// **WARNING** Mutating the items in the binary heap can leave the heap in an inconsistent
/// state.
pub fn iter_mut(&mut self) -> slice::IterMut<'_, T> {
self.0.data.as_mut_slice().iter_mut()
self.data.as_mut_slice().iter_mut()
}
/// Returns the *top* (greatest if max-heap, smallest if min-heap) item in the binary heap, or
@ -213,9 +197,8 @@ where
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert_eq!(heap.peek(), None);
///
/// heap.push(1).unwrap();
@ -224,7 +207,7 @@ where
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn peek(&self) -> Option<&T> {
self.0.data.as_slice().get(0)
self.data.as_slice().get(0)
}
/// Returns a mutable reference to the greatest item in the binary heap, or
@ -239,9 +222,8 @@ where
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// assert!(heap.peek_mut().is_none());
///
/// heap.push(1);
@ -254,7 +236,7 @@ where
///
/// assert_eq!(heap.peek(), Some(&2));
/// ```
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, N, K>> {
pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T, K, N>> {
if self.is_empty() {
None
} else {
@ -270,9 +252,8 @@ where
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(1).unwrap();
/// heap.push(3).unwrap();
///
@ -291,10 +272,10 @@ where
/// Removes the *top* (greatest if max-heap, smallest if min-heap) item from the binary heap and
/// returns it, without checking if the binary heap is empty.
pub unsafe fn pop_unchecked(&mut self) -> T {
let mut item = self.0.data.pop_unchecked();
let mut item = self.data.pop_unchecked();
if !self.is_empty() {
mem::swap(&mut item, self.0.data.as_mut_slice().get_unchecked_mut(0));
mem::swap(&mut item, self.data.as_mut_slice().get_unchecked_mut(0));
self.sift_down_to_bottom(0);
}
item
@ -304,9 +285,8 @@ where
///
/// ```
/// use heapless::binary_heap::{BinaryHeap, Max};
/// use heapless::consts::*;
///
/// let mut heap: BinaryHeap<_, U8, Max> = BinaryHeap::new();
/// let mut heap: BinaryHeap<_, Max, 8> = BinaryHeap::new();
/// heap.push(3).unwrap();
/// heap.push(5).unwrap();
/// heap.push(1).unwrap();
@ -315,7 +295,7 @@ where
/// assert_eq!(heap.peek(), Some(&5));
/// ```
pub fn push(&mut self, item: T) -> Result<(), T> {
if self.0.data.is_full() {
if self.data.is_full() {
return Err(item);
}
@ -326,7 +306,7 @@ where
/// Pushes an item onto the binary heap without first checking if it's full.
pub unsafe fn push_unchecked(&mut self, item: T) {
let old_len = self.len();
self.0.data.push_unchecked(item);
self.data.push_unchecked(item);
self.sift_up(0, old_len);
}
@ -335,7 +315,7 @@ where
let end = self.len();
let start = pos;
unsafe {
let mut hole = Hole::new(self.0.data.as_mut_slice(), pos);
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
let mut child = 2 * pos + 1;
while child < end {
let right = child + 1;
@ -354,7 +334,7 @@ where
fn sift_up(&mut self, start: usize, pos: usize) -> usize {
unsafe {
// Take out the value at `pos` and create a hole.
let mut hole = Hole::new(self.0.data.as_mut_slice(), pos);
let mut hole = Hole::new(self.data.as_mut_slice(), pos);
while hole.pos() > start {
let parent = (hole.pos() - 1) / 2;
@ -437,20 +417,18 @@ impl<'a, T> Hole<'a, T> {
///
/// [`peek_mut`]: struct.BinaryHeap.html#method.peek_mut
/// [`BinaryHeap`]: struct.BinaryHeap.html
pub struct PeekMut<'a, T, N, K>
pub struct PeekMut<'a, T, K, const N: usize>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
heap: &'a mut BinaryHeap<T, N, K>,
heap: &'a mut BinaryHeap<T, K, N>,
sift: bool,
}
impl<T, N, K> Drop for PeekMut<'_, T, N, K>
impl<T, K, const N: usize> Drop for PeekMut<'_, T, K, N>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
fn drop(&mut self) {
@ -460,41 +438,38 @@ where
}
}
impl<T, N, K> Deref for PeekMut<'_, T, N, K>
impl<T, K, const N: usize> Deref for PeekMut<'_, T, K, N>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
type Target = T;
fn deref(&self) -> &T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.0.data.as_slice().get_unchecked(0) }
unsafe { self.heap.data.as_slice().get_unchecked(0) }
}
}
impl<T, N, K> DerefMut for PeekMut<'_, T, N, K>
impl<T, K, const N: usize> DerefMut for PeekMut<'_, T, K, N>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
fn deref_mut(&mut self) -> &mut T {
debug_assert!(!self.heap.is_empty());
// SAFE: PeekMut is only instantiated for non-empty heaps
unsafe { self.heap.0.data.as_mut_slice().get_unchecked_mut(0) }
unsafe { self.heap.data.as_mut_slice().get_unchecked_mut(0) }
}
}
impl<'a, T, N, K> PeekMut<'a, T, N, K>
impl<'a, T, K, const N: usize> PeekMut<'a, T, K, N>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
/// Removes the peeked value from the heap and returns it.
pub fn pop(mut this: PeekMut<'a, T, N, K>) -> T {
pub fn pop(mut this: PeekMut<'a, T, K, N>) -> T {
let value = this.heap.pop().unwrap();
this.sift = false;
value
@ -512,10 +487,9 @@ impl<'a, T> Drop for Hole<'a, T> {
}
}
impl<T, N, K> Default for BinaryHeap<T, N, K>
impl<T, K, const N: usize> Default for BinaryHeap<T, K, N>
where
T: Ord,
N: ArrayLength<T>,
K: Kind,
{
fn default() -> Self {
@ -523,34 +497,27 @@ where
}
}
impl<T, N, K> Clone for BinaryHeap<T, N, K>
impl<T, K, const N: usize> Clone for BinaryHeap<T, K, N>
where
N: ArrayLength<T>,
K: Kind,
T: Ord + Clone,
{
fn clone(&self) -> Self {
BinaryHeap(crate::i::BinaryHeap {
_kind: self.0._kind,
data: self.0.data.clone(),
})
Self {
_kind: self._kind,
data: self.data.clone(),
}
}
}
impl<T, N, K> Drop for BinaryHeap<T, N, K>
where
N: ArrayLength<T>,
K: Kind,
T: Ord,
{
impl<T, K, const N: usize> Drop for BinaryHeap<T, K, N> {
fn drop(&mut self) {
unsafe { ptr::drop_in_place(self.0.data.as_mut_slice()) }
unsafe { ptr::drop_in_place(self.data.as_mut_slice()) }
}
}
impl<T, N, K> fmt::Debug for BinaryHeap<T, N, K>
impl<T, K, const N: usize> fmt::Debug for BinaryHeap<T, K, N>
where
N: ArrayLength<T>,
K: Kind,
T: Ord + fmt::Debug,
{
@ -559,9 +526,8 @@ where
}
}
impl<'a, T, N, K> IntoIterator for &'a BinaryHeap<T, N, K>
impl<'a, T, K, const N: usize> IntoIterator for &'a BinaryHeap<T, K, N>
where
N: ArrayLength<T>,
K: Kind,
T: Ord,
{
@ -577,19 +543,16 @@ where
mod tests {
use std::vec::Vec;
use crate::{
binary_heap::{self, BinaryHeap, Min},
consts::*,
};
use crate::binary_heap::{BinaryHeap, Max, Min};
#[test]
fn static_new() {
static mut _B: BinaryHeap<i32, U16, Min> = BinaryHeap(crate::i::BinaryHeap::new());
static mut _B: BinaryHeap<i32, Min, 16> = BinaryHeap::new();
}
#[test]
fn min() {
let mut heap = BinaryHeap::<_, U16, Min>::new();
let mut heap = BinaryHeap::<_, Min, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();
@ -641,7 +604,7 @@ mod tests {
#[test]
fn max() {
let mut heap = BinaryHeap::<_, U16, binary_heap::Max>::new();
let mut heap = BinaryHeap::<_, Max, 16>::new();
heap.push(1).unwrap();
heap.push(2).unwrap();
heap.push(3).unwrap();

View File

@ -1,36 +1,31 @@
use crate::{
sealed::binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use core::{fmt, marker::PhantomData};
use generic_array::{typenum::PowerOfTwo, ArrayLength};
use hash32::{BuildHasherDefault, Hash, Hasher};
use serde::de::{self, Deserialize, Deserializer, Error, MapAccess, SeqAccess};
use crate::{
indexmap::{Bucket, Pos},
sealed::binary_heap::Kind as BinaryHeapKind,
BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec,
};
// Sequential containers
impl<'de, T, N, KIND> Deserialize<'de> for BinaryHeap<T, N, KIND>
impl<'de, T, KIND, const N: usize> Deserialize<'de> for BinaryHeap<T, KIND, N>
where
T: Ord + Deserialize<'de>,
N: ArrayLength<T>,
KIND: BinaryHeapKind,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, N, KIND>(PhantomData<(&'de (), T, N, KIND)>);
struct ValueVisitor<'de, T, KIND, const N: usize>(PhantomData<(&'de (), T, KIND)>);
impl<'de, T, N, KIND> de::Visitor<'de> for ValueVisitor<'de, T, N, KIND>
impl<'de, T, KIND, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, KIND, N>
where
T: Ord + Deserialize<'de>,
N: ArrayLength<T>,
KIND: BinaryHeapKind,
{
type Value = BinaryHeap<T, N, KIND>;
type Value = BinaryHeap<T, KIND, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
@ -55,25 +50,23 @@ where
}
}
impl<'de, T, N, S> Deserialize<'de> for IndexSet<T, N, BuildHasherDefault<S>>
impl<'de, T, S, const N: usize> Deserialize<'de> for IndexSet<T, BuildHasherDefault<S>, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>> + PowerOfTwo,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, N, S>(PhantomData<(&'de (), T, N, S)>);
struct ValueVisitor<'de, T, S, const N: usize>(PhantomData<(&'de (), T, S)>);
impl<'de, T, N, S> de::Visitor<'de> for ValueVisitor<'de, T, N, S>
impl<'de, T, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, T, S, N>
where
T: Eq + Hash + Deserialize<'de>,
S: Hasher + Default,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>> + PowerOfTwo,
{
type Value = IndexSet<T, N, BuildHasherDefault<S>>;
type Value = IndexSet<T, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
@ -98,20 +91,18 @@ where
}
}
impl<'de, T, N> Deserialize<'de> for Vec<T, N>
impl<'de, T, const N: usize> Deserialize<'de> for Vec<T, N>
where
N: ArrayLength<T>,
T: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, T, N>(PhantomData<(&'de (), T, N)>);
struct ValueVisitor<'de, T, const N: usize>(PhantomData<(&'de (), T)>);
impl<'de, T, N> de::Visitor<'de> for ValueVisitor<'de, T, N>
impl<'de, T, const N: usize> serde::de::Visitor<'de> for ValueVisitor<'de, T, N>
where
N: ArrayLength<T>,
T: Deserialize<'de>,
{
type Value = Vec<T, N>;
@ -141,27 +132,25 @@ where
// Dictionaries
impl<'de, K, V, N, S> Deserialize<'de> for IndexMap<K, V, N, BuildHasherDefault<S>>
impl<'de, K, V, S, const N: usize> Deserialize<'de> for IndexMap<K, V, BuildHasherDefault<S>, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>> + PowerOfTwo,
S: Default + Hasher,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, N, S>(PhantomData<(&'de (), K, V, N, S)>);
struct ValueVisitor<'de, K, V, S, const N: usize>(PhantomData<(&'de (), K, V, S)>);
impl<'de, K, V, N, S> de::Visitor<'de> for ValueVisitor<'de, K, V, N, S>
impl<'de, K, V, S, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, S, N>
where
K: Eq + Hash + Deserialize<'de>,
V: Deserialize<'de>,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>> + PowerOfTwo,
S: Default + Hasher,
{
type Value = IndexMap<K, V, N, BuildHasherDefault<S>>;
type Value = IndexMap<K, V, BuildHasherDefault<S>, N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
@ -186,23 +175,21 @@ where
}
}
impl<'de, K, V, N> Deserialize<'de> for LinearMap<K, V, N>
impl<'de, K, V, const N: usize> Deserialize<'de> for LinearMap<K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
N: ArrayLength<(K, V)>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, K, V, N>(PhantomData<(&'de (), K, V, N)>);
struct ValueVisitor<'de, K, V, const N: usize>(PhantomData<(&'de (), K, V)>);
impl<'de, K, V, N> de::Visitor<'de> for ValueVisitor<'de, K, V, N>
impl<'de, K, V, const N: usize> de::Visitor<'de> for ValueVisitor<'de, K, V, N>
where
K: Eq + Deserialize<'de>,
V: Deserialize<'de>,
N: ArrayLength<(K, V)>,
{
type Value = LinearMap<K, V, N>;
@ -231,28 +218,18 @@ where
// String containers
impl<'de, N> Deserialize<'de> for String<N>
where
N: ArrayLength<u8>,
{
impl<'de, const N: usize> Deserialize<'de> for String<N> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ValueVisitor<'de, N>(PhantomData<(&'de (), N)>);
struct ValueVisitor<'de, const N: usize>(PhantomData<&'de ()>);
impl<'de, N> de::Visitor<'de> for ValueVisitor<'de, N>
where
N: ArrayLength<u8>,
{
impl<'de, const N: usize> de::Visitor<'de> for ValueVisitor<'de, N> {
type Value = String<N>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
formatter,
"a string no more than {} bytes long",
N::to_u64()
)
write!(formatter, "a string no more than {} bytes long", N as u64)
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
@ -269,13 +246,15 @@ where
where
E: de::Error,
{
let mut bytes = Vec::new();
if bytes.extend_from_slice(v).is_err() {
return Err(E::invalid_value(de::Unexpected::Bytes(v), &self));
}
let mut s = String::new();
String::from_utf8(bytes)
.map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))
s.push_str(
core::str::from_utf8(v)
.map_err(|_| E::invalid_value(de::Unexpected::Bytes(v), &self))?,
)
.map_err(|_| E::invalid_length(v.len(), &self))?;
Ok(s)
}
}

View File

@ -1,4 +1,6 @@
use generic_array::{sequence::GenericSequence, ArrayLength, GenericArray};
use core::mem::MaybeUninit;
use core::ptr;
use core::slice;
/// A "history buffer", similar to a write-only ring buffer of fixed length.
///
@ -6,67 +8,58 @@ use generic_array::{sequence::GenericSequence, ArrayLength, GenericArray};
/// is overwritten. Thus, the buffer is useful to keep a history of values with
/// some desired depth, and for example calculate a rolling average.
///
/// The buffer is always fully initialized; depending on the constructor, the
/// initial value is either the default value for the element type or a supplied
/// initial value. This simplifies the API and is mostly irrelevant for the
/// intended use case.
///
/// # Examples
/// ```
/// use heapless::HistoryBuffer;
/// use heapless::consts::*;
///
/// // Initialize a new buffer with 8 elements, all initially zero.
/// let mut buf = HistoryBuffer::<_, U8>::new();
/// // Initialize a new buffer with 8 elements.
/// let mut buf = HistoryBuffer::<_, 8>::new();
///
/// // Starts with no data
/// assert_eq!(buf.recent(), None);
///
/// buf.write(3);
/// buf.write(5);
/// buf.extend(&[4, 4]);
///
/// // The most recent written element is a four.
/// assert_eq!(buf.recent(), &4);
/// assert_eq!(buf.recent(), Some(&4));
///
/// // To access all elements in an unspecified order, use `as_slice()`.
/// for el in buf.as_slice() { println!("{:?}", el); }
///
/// // Now we can prepare an average of all values, which comes out to 2.
/// // Now we can prepare an average of all values, which comes out to 4.
/// let avg = buf.as_slice().iter().sum::<usize>() / buf.len();
/// assert_eq!(avg, 2);
/// assert_eq!(avg, 4);
/// ```
#[derive(Clone)]
pub struct HistoryBuffer<T, N>
where
N: ArrayLength<T>,
{
data: GenericArray<T, N>,
pub struct HistoryBuffer<T, const N: usize> {
data: [MaybeUninit<T>; N],
write_at: usize,
filled: bool,
}
impl<T, N> HistoryBuffer<T, N>
where
N: ArrayLength<T>,
T: Default,
{
/// Constructs a new history buffer, where every element is filled with the
/// default value of the type `T`.
impl<T, const N: usize> HistoryBuffer<T, N> {
const INIT: MaybeUninit<T> = MaybeUninit::uninit();
/// Constructs a new history buffer.
///
/// `HistoryBuffer` currently cannot be constructed in `const` context.
/// The construction of a `HistoryBuffer` works in `const` contexts.
///
/// # Examples
///
/// ```
/// use heapless::HistoryBuffer;
/// use heapless::consts::*;
///
/// // Allocate a 16-element buffer on the stack
/// let mut x: HistoryBuffer<u8, U16> = HistoryBuffer::new();
/// // All elements are zero
/// assert_eq!(x.as_slice(), [0; 16]);
/// let x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// assert_eq!(x.len(), 0);
/// ```
pub fn new() -> Self {
#[inline]
pub const fn new() -> Self {
Self {
data: Default::default(),
data: [Self::INIT; N],
write_at: 0,
filled: false,
}
}
@ -77,10 +70,9 @@ where
}
}
impl<T, N> HistoryBuffer<T, N>
impl<T, const N: usize> HistoryBuffer<T, N>
where
N: ArrayLength<T>,
T: Clone,
T: Copy + Clone,
{
/// Constructs a new history buffer, where every element is the given value.
///
@ -88,17 +80,18 @@ where
///
/// ```
/// use heapless::HistoryBuffer;
/// use heapless::consts::*;
///
/// // Allocate a 16-element buffer on the stack
/// let mut x: HistoryBuffer<u8, U16> = HistoryBuffer::new_with(4);
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new_with(4);
/// // All elements are four
/// assert_eq!(x.as_slice(), [4; 16]);
/// ```
#[inline]
pub fn new_with(t: T) -> Self {
Self {
data: GenericArray::generate(|_| t.clone()),
data: [MaybeUninit::new(t); N],
write_at: 0,
filled: true,
}
}
@ -108,22 +101,36 @@ where
}
}
impl<T, N> HistoryBuffer<T, N>
where
N: ArrayLength<T>,
{
impl<T, const N: usize> HistoryBuffer<T, N> {
/// Returns the current fill level of the buffer.
#[inline]
pub fn len(&self) -> usize {
if self.filled {
N
} else {
self.write_at
}
}
/// Returns the capacity of the buffer, which is the length of the
/// underlying backing array.
pub fn len(&self) -> usize {
self.data.len()
#[inline]
pub fn capacity(&self) -> usize {
N
}
/// Writes an element to the buffer, overwriting the oldest value.
pub fn write(&mut self, t: T) {
self.data[self.write_at] = t;
if self.filled {
// Drop the old before we overwrite it.
unsafe { ptr::drop_in_place(self.data[self.write_at].as_mut_ptr()) }
}
self.data[self.write_at] = MaybeUninit::new(t);
self.write_at += 1;
if self.write_at == self.len() {
if self.write_at == self.capacity() {
self.write_at = 0;
self.filled = true;
}
}
@ -146,32 +153,32 @@ where
///
/// ```
/// use heapless::HistoryBuffer;
/// use heapless::consts::*;
///
/// let mut x: HistoryBuffer<u8, U16> = HistoryBuffer::new();
/// let mut x: HistoryBuffer<u8, 16> = HistoryBuffer::new();
/// x.write(4);
/// x.write(10);
/// assert_eq!(x.recent(), &10);
/// assert_eq!(x.recent(), Some(&10));
/// ```
pub fn recent(&self) -> &T {
pub fn recent(&self) -> Option<&T> {
if self.write_at == 0 {
&self.data[self.len() - 1]
if self.filled {
Some(unsafe { &*self.data[self.capacity() - 1].as_ptr() })
} else {
None
}
} else {
&self.data[self.write_at - 1]
Some(unsafe { &*self.data[self.write_at - 1].as_ptr() })
}
}
/// Returns the array slice backing the buffer, without keeping track
/// of the write position. Therefore, the element order is unspecified.
pub fn as_slice(&self) -> &[T] {
&self.data
unsafe { slice::from_raw_parts(self.data.as_ptr() as *const _, self.len()) }
}
}
impl<T, N> Extend<T> for HistoryBuffer<T, N>
where
N: ArrayLength<T>,
{
impl<T, const N: usize> Extend<T> for HistoryBuffer<T, N> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = T>,
@ -182,10 +189,9 @@ where
}
}
impl<'a, T, N> Extend<&'a T> for HistoryBuffer<T, N>
impl<'a, T, const N: usize> Extend<&'a T> for HistoryBuffer<T, N>
where
T: 'a + Clone,
N: ArrayLength<T>,
{
fn extend<I>(&mut self, iter: I)
where
@ -195,26 +201,37 @@ where
}
}
impl<T, const N: usize> Drop for HistoryBuffer<T, N> {
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
self.data.as_mut_ptr() as *mut T,
self.len(),
))
}
}
}
#[cfg(test)]
mod tests {
use crate::{consts::*, HistoryBuffer};
use crate::HistoryBuffer;
#[test]
fn new() {
let x: HistoryBuffer<u8, U4> = HistoryBuffer::new_with(1);
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
assert_eq!(x.len(), 4);
assert_eq!(x.as_slice(), [1; 4]);
let x: HistoryBuffer<u8, U4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), [0; 4]);
let x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
}
#[test]
fn write() {
let mut x: HistoryBuffer<u8, U4> = HistoryBuffer::new();
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.write(1);
x.write(4);
assert_eq!(x.as_slice(), [1, 4, 0, 0]);
assert_eq!(x.as_slice(), [1, 4]);
x.write(5);
x.write(6);
@ -227,33 +244,35 @@ mod tests {
#[test]
fn clear() {
let mut x: HistoryBuffer<u8, U4> = HistoryBuffer::new_with(1);
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new_with(1);
x.clear();
assert_eq!(x.as_slice(), [0; 4]);
assert_eq!(x.as_slice(), []);
let mut x: HistoryBuffer<u8, U4> = HistoryBuffer::new();
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
x.clear_with(1);
assert_eq!(x.as_slice(), [1; 4]);
}
#[test]
fn recent() {
let mut x: HistoryBuffer<u8, U4> = HistoryBuffer::new();
assert_eq!(x.recent(), &0);
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.recent(), None);
x.write(1);
x.write(4);
assert_eq!(x.recent(), &4);
assert_eq!(x.recent(), Some(&4));
x.write(5);
x.write(6);
x.write(10);
assert_eq!(x.recent(), &10);
assert_eq!(x.recent(), Some(&10));
}
#[test]
fn as_slice() {
let mut x: HistoryBuffer<u8, U4> = HistoryBuffer::new();
let mut x: HistoryBuffer<u8, 4> = HistoryBuffer::new();
assert_eq!(x.as_slice(), []);
x.extend([1, 2, 3, 4, 5].iter());

View File

@ -1,40 +0,0 @@
//! Unfortunate implementation detail required to construct `heapless` types in const context
use core::{marker::PhantomData, mem::MaybeUninit};
#[cfg(has_atomics)]
use crate::spsc::{Atomic, MultiCore};
/// `const-fn` version of [`BinaryHeap`](../binary_heap/struct.BinaryHeap.html)
pub struct BinaryHeap<A, K> {
pub(crate) _kind: PhantomData<K>,
pub(crate) data: Vec<A>,
}
/// `const-fn` version of [`LinearMap`](../struct.LinearMap.html)
pub struct LinearMap<A> {
pub(crate) buffer: Vec<A>,
}
/// `const-fn` version of [`spsc::Queue`](../spsc/struct.Queue.html)
#[cfg(has_atomics)]
pub struct Queue<A, U = usize, C = MultiCore> {
// this is from where we dequeue items
pub(crate) head: Atomic<U, C>,
// this is where we enqueue new items
pub(crate) tail: Atomic<U, C>,
pub(crate) buffer: MaybeUninit<A>,
}
/// `const-fn` version of [`String`](../struct.String.html)
pub struct String<A> {
pub(crate) vec: Vec<A>,
}
/// `const-fn` version of [`Vec`](../struct.Vec.html)
pub struct Vec<A> {
pub(crate) buffer: MaybeUninit<A>,
pub(crate) len: usize,
}

View File

@ -1,13 +1,5 @@
use core::{
borrow::Borrow,
fmt,
iter::FromIterator,
mem::{self, MaybeUninit},
num::NonZeroU32,
ops, slice,
};
use core::{borrow::Borrow, fmt, iter::FromIterator, mem, num::NonZeroU32, ops, slice};
use generic_array::{typenum::PowerOfTwo, ArrayLength, GenericArray};
use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher};
use crate::Vec;
@ -20,10 +12,9 @@ use crate::Vec;
/// # Examples
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// // A hash map with a capacity of 16 key-value pairs allocated on the stack
/// let mut book_reviews = FnvIndexMap::<_, _, U16>::new();
/// let mut book_reviews = FnvIndexMap::<_, _, 16>::new();
///
/// // review some books.
/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap();
@ -54,7 +45,7 @@ use crate::Vec;
/// println!("{}: \"{}\"", book, review);
/// }
/// ```
pub type FnvIndexMap<K, V, N> = IndexMap<K, V, N, BuildHasherDefault<FnvHasher>>;
pub type FnvIndexMap<K, V, const N: usize> = IndexMap<K, V, BuildHasherDefault<FnvHasher>, N>;
#[derive(Clone, Copy, Eq, PartialEq)]
struct HashValue(u16);
@ -126,30 +117,28 @@ macro_rules! probe_loop {
}
}
struct CoreMap<K, V, N>
where
K: Eq + Hash,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
struct CoreMap<K, V, const N: usize> {
entries: Vec<Bucket<K, V>, N>,
indices: GenericArray<Option<Pos>, N>,
indices: [Option<Pos>; N],
}
impl<K, V, N> CoreMap<K, V, N>
where
K: Eq + Hash,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
// TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn`
fn new() -> Self {
impl<K, V, const N: usize> CoreMap<K, V, N> {
const fn new() -> Self {
const INIT: Option<Pos> = None;
CoreMap {
entries: Vec::new(),
indices: unsafe { MaybeUninit::zeroed().assume_init() },
indices: [INIT; N],
}
}
}
impl<K, V, const N: usize> CoreMap<K, V, N>
where
K: Eq + Hash,
{
fn capacity() -> usize {
N::to_usize()
N
}
fn mask() -> usize {
@ -311,11 +300,10 @@ where
}
}
impl<K, V, N> Clone for CoreMap<K, V, N>
impl<K, V, const N: usize> Clone for CoreMap<K, V, N>
where
K: Eq + Hash + Clone,
V: Clone,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn clone(&self) -> Self {
Self {
@ -339,10 +327,9 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// // A hash map with a capacity of 16 key-value pairs allocated on the stack
/// let mut book_reviews = FnvIndexMap::<_, _, U16>::new();
/// let mut book_reviews = FnvIndexMap::<_, _, 16>::new();
///
/// // review some books.
/// book_reviews.insert("Adventures of Huckleberry Finn", "My favorite book.").unwrap();
@ -373,52 +360,38 @@ where
/// println!("{}: \"{}\"", book, review);
/// }
/// ```
pub struct IndexMap<K, V, N, S>
where
K: Eq + Hash,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
pub struct IndexMap<K, V, S, const N: usize> {
core: CoreMap<K, V, N>,
build_hasher: S,
}
impl<K, V, N, S> IndexMap<K, V, N, BuildHasherDefault<S>>
where
K: Eq + Hash,
S: Default + Hasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>> + PowerOfTwo,
{
// TODO turn into a `const fn`; needs `mem::zeroed` to be a `const fn`
impl<K, V, S, const N: usize> IndexMap<K, V, BuildHasherDefault<S>, N> {
/// Creates an empty `IndexMap`.
///
/// **NOTE** This constructor will become a `const fn` in the future
pub fn new() -> Self {
pub const fn new() -> Self {
IndexMap {
build_hasher: BuildHasherDefault::default(),
build_hasher: BuildHasherDefault::new(),
core: CoreMap::new(),
}
}
}
impl<K, V, N, S> IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
/* Public API */
/// Returns the number of elements the map can hold
pub fn capacity(&self) -> usize {
N::to_usize()
N
}
/// Return an iterator over the keys of the map, in their order
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -435,9 +408,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -454,9 +426,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -477,9 +448,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -498,9 +468,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -528,9 +497,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut a = FnvIndexMap::<_, _, U16>::new();
/// let mut a = FnvIndexMap::<_, _, 16>::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a").unwrap();
/// assert_eq!(a.len(), 1);
@ -545,9 +513,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut a = FnvIndexMap::<_, _, U16>::new();
/// let mut a = FnvIndexMap::<_, _, 16>::new();
/// assert!(a.is_empty());
/// a.insert(1, "a");
/// assert!(!a.is_empty());
@ -562,9 +529,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut a = FnvIndexMap::<_, _, U16>::new();
/// let mut a = FnvIndexMap::<_, _, 16>::new();
/// a.insert(1, "a");
/// a.clear();
/// assert!(a.is_empty());
@ -585,9 +551,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U16>::new();
/// let mut map = FnvIndexMap::<_, _, 16>::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
@ -612,9 +577,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U8>::new();
/// let mut map = FnvIndexMap::<_, _, 8>::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
@ -638,9 +602,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U8>::new();
/// let mut map = FnvIndexMap::<_, _, 8>::new();
/// map.insert(1, "a").unwrap();
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
@ -677,9 +640,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U8>::new();
/// let mut map = FnvIndexMap::<_, _, 8>::new();
/// assert_eq!(map.insert(37, "a"), Ok(None));
/// assert_eq!(map.is_empty(), false);
///
@ -710,9 +672,8 @@ where
///
/// ```
/// use heapless::FnvIndexMap;
/// use heapless::consts::*;
///
/// let mut map = FnvIndexMap::<_, _, U8>::new();
/// let mut map = FnvIndexMap::<_, _, 8>::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
@ -762,12 +723,11 @@ where
}
}
impl<'a, K, Q, V, N, S> ops::Index<&'a Q> for IndexMap<K, V, N, S>
impl<'a, K, Q, V, S, const N: usize> ops::Index<&'a Q> for IndexMap<K, V, S, N>
where
K: Eq + Hash + Borrow<Q>,
Q: ?Sized + Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
type Output = V;
@ -776,24 +736,22 @@ where
}
}
impl<'a, K, Q, V, N, S> ops::IndexMut<&'a Q> for IndexMap<K, V, N, S>
impl<'a, K, Q, V, S, const N: usize> ops::IndexMut<&'a Q> for IndexMap<K, V, S, N>
where
K: Eq + Hash + Borrow<Q>,
Q: ?Sized + Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn index_mut(&mut self, key: &Q) -> &mut V {
self.get_mut(key).expect("key not found")
}
}
impl<K, V, N, S> Clone for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> Clone for IndexMap<K, V, S, N>
where
K: Eq + Hash + Clone,
V: Clone,
S: Clone,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn clone(&self) -> Self {
Self {
@ -803,23 +761,21 @@ where
}
}
impl<K, V, N, S> fmt::Debug for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> fmt::Debug for IndexMap<K, V, S, N>
where
K: Eq + Hash + fmt::Debug,
V: fmt::Debug,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<K, V, N, S> Default for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> Default for IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher + Default,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn default() -> Self {
IndexMap {
@ -829,16 +785,15 @@ where
}
}
impl<K, V, N, S, N2, S2> PartialEq<IndexMap<K, V, N2, S2>> for IndexMap<K, V, N, S>
impl<K, V, S, S2, const N: usize, const N2: usize> PartialEq<IndexMap<K, V, S2, N2>>
for IndexMap<K, V, S, N>
where
K: Eq + Hash,
V: Eq,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
N2: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn eq(&self, other: &IndexMap<K, V, N2, S2>) -> bool {
fn eq(&self, other: &IndexMap<K, V, S2, N2>) -> bool {
self.len() == other.len()
&& self
.iter()
@ -846,20 +801,18 @@ where
}
}
impl<K, V, N, S> Eq for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> Eq for IndexMap<K, V, S, N>
where
K: Eq + Hash,
V: Eq,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
}
impl<K, V, N, S> Extend<(K, V)> for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> Extend<(K, V)> for IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn extend<I>(&mut self, iterable: I)
where
@ -871,12 +824,11 @@ where
}
}
impl<'a, K, V, N, S> Extend<(&'a K, &'a V)> for IndexMap<K, V, N, S>
impl<'a, K, V, S, const N: usize> Extend<(&'a K, &'a V)> for IndexMap<K, V, S, N>
where
K: Eq + Hash + Copy,
V: Copy,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn extend<I>(&mut self, iterable: I)
where
@ -886,11 +838,10 @@ where
}
}
impl<K, V, N, S> FromIterator<(K, V)> for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> FromIterator<(K, V)> for IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher + Default,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
fn from_iter<I>(iterable: I) -> Self
where
@ -902,11 +853,10 @@ where
}
}
impl<'a, K, V, N, S> IntoIterator for &'a IndexMap<K, V, N, S>
impl<'a, K, V, S, const N: usize> IntoIterator for &'a IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
@ -916,11 +866,10 @@ where
}
}
impl<'a, K, V, N, S> IntoIterator for &'a mut IndexMap<K, V, N, S>
impl<'a, K, V, S, const N: usize> IntoIterator for &'a mut IndexMap<K, V, S, N>
where
K: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
{
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
@ -976,21 +925,16 @@ where
#[cfg(test)]
mod tests {
use crate::FnvIndexMap;
use core::mem;
use generic_array::typenum::Unsigned;
use crate::{consts::*, FnvIndexMap};
#[test]
fn size() {
type Cap = U4;
let cap = Cap::to_usize();
const CAP: usize = 4;
assert_eq!(
mem::size_of::<FnvIndexMap<i16, u16, Cap>>(),
cap * mem::size_of::<u32>() + // indices
cap * (mem::size_of::<i16>() + // key
mem::size_of::<FnvIndexMap<i16, u16, CAP>>(),
CAP * mem::size_of::<u32>() + // indices
CAP * (mem::size_of::<i16>() + // key
mem::size_of::<u16>() + // value
mem::size_of::<u16>() // hash
) + // buckets
@ -1001,10 +945,10 @@ mod tests {
#[test]
fn partial_eq() {
{
let mut a: FnvIndexMap<_, _, U4> = FnvIndexMap::new();
let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new();
a.insert("k1", "v1").unwrap();
let mut b: FnvIndexMap<_, _, U4> = FnvIndexMap::new();
let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new();
b.insert("k1", "v1").unwrap();
assert!(a == b);
@ -1015,11 +959,11 @@ mod tests {
}
{
let mut a: FnvIndexMap<_, _, U4> = FnvIndexMap::new();
let mut a: FnvIndexMap<_, _, 4> = FnvIndexMap::new();
a.insert("k1", "v1").unwrap();
a.insert("k2", "v2").unwrap();
let mut b: FnvIndexMap<_, _, U4> = FnvIndexMap::new();
let mut b: FnvIndexMap<_, _, 4> = FnvIndexMap::new();
b.insert("k2", "v2").unwrap();
b.insert("k1", "v1").unwrap();

View File

@ -1,10 +1,7 @@
use crate::indexmap::{self, IndexMap};
use core::{borrow::Borrow, fmt, iter::FromIterator};
use generic_array::{typenum::PowerOfTwo, ArrayLength};
use hash32::{BuildHasher, BuildHasherDefault, FnvHasher, Hash, Hasher};
use crate::indexmap::{self, Bucket, IndexMap, Pos};
/// A [`heapless::IndexSet`](./struct.IndexSet.html) using the
/// default FNV hasher.
/// A list of all Methods and Traits available for `FnvIndexSet` can be found in
@ -13,10 +10,9 @@ use crate::indexmap::{self, Bucket, IndexMap, Pos};
/// # Examples
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, U16>::new();
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
@ -38,7 +34,7 @@ use crate::indexmap::{self, Bucket, IndexMap, Pos};
/// println!("{}", book);
/// }
/// ```
pub type FnvIndexSet<T, N> = IndexSet<T, N, BuildHasherDefault<FnvHasher>>;
pub type FnvIndexSet<T, const N: usize> = IndexSet<T, BuildHasherDefault<FnvHasher>, N>;
/// Fixed capacity [`IndexSet`](https://docs.rs/indexmap/1/indexmap/set/struct.IndexSet.html).
///
@ -54,10 +50,9 @@ pub type FnvIndexSet<T, N> = IndexSet<T, N, BuildHasherDefault<FnvHasher>>;
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// // A hash set with a capacity of 16 elements allocated on the stack
/// let mut books = FnvIndexSet::<_, U16>::new();
/// let mut books = FnvIndexSet::<_, 16>::new();
///
/// // Add some books.
/// books.insert("A Dance With Dragons").unwrap();
@ -79,33 +74,32 @@ pub type FnvIndexSet<T, N> = IndexSet<T, N, BuildHasherDefault<FnvHasher>>;
/// println!("{}", book);
/// }
/// ```
pub struct IndexSet<T, N, S>
pub struct IndexSet<T, S, const N: usize>
where
T: Eq + Hash,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
map: IndexMap<T, (), N, S>,
map: IndexMap<T, (), S, N>,
}
impl<T, N, S> IndexSet<T, N, BuildHasherDefault<S>>
impl<T, S, const N: usize> IndexSet<T, BuildHasherDefault<S>, N>
where
T: Eq + Hash,
S: Default + Hasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>> + PowerOfTwo,
{
/// Creates an empty `IndexSet`
pub fn new() -> Self {
assert!(N.is_power_of_two());
IndexSet {
map: IndexMap::new(),
}
}
}
impl<T, N, S> IndexSet<T, N, S>
impl<T, S, const N: usize> IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
/// Returns the number of elements the set can hold
///
@ -113,9 +107,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let set = FnvIndexSet::<i32, U16>::new();
/// let set = FnvIndexSet::<i32, 16>::new();
/// assert_eq!(set.capacity(), 16);
/// ```
pub fn capacity(&self) -> usize {
@ -128,9 +121,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut set = FnvIndexSet::<_, U16>::new();
/// let mut set = FnvIndexSet::<_, 16>::new();
/// set.insert("a").unwrap();
/// set.insert("b").unwrap();
///
@ -152,30 +144,28 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect();
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Can be seen as `a - b`.
/// for x in a.difference(&b) {
/// println!("{}", x); // Print 1
/// }
///
/// let diff: FnvIndexSet<_, U16> = a.difference(&b).collect();
/// assert_eq!(diff, [1].iter().collect::<FnvIndexSet<_, U16>>());
/// let diff: FnvIndexSet<_, 16> = a.difference(&b).collect();
/// assert_eq!(diff, [1].iter().collect::<FnvIndexSet<_, 16>>());
///
/// // Note that difference is not symmetric,
/// // and `b - a` means something else:
/// let diff: FnvIndexSet<_, U16> = b.difference(&a).collect();
/// assert_eq!(diff, [4].iter().collect::<FnvIndexSet<_, U16>>());
/// let diff: FnvIndexSet<_, 16> = b.difference(&a).collect();
/// assert_eq!(diff, [4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn difference<'a, N2, S2>(
pub fn difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, N2, S2>,
) -> Difference<'a, T, N2, S2>
other: &'a IndexSet<T, S2, N2>,
) -> Difference<'a, T, S2, N2>
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
Difference {
@ -191,28 +181,26 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect();
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 4 in that order order.
/// for x in a.symmetric_difference(&b) {
/// println!("{}", x);
/// }
///
/// let diff1: FnvIndexSet<_, U16> = a.symmetric_difference(&b).collect();
/// let diff2: FnvIndexSet<_, U16> = b.symmetric_difference(&a).collect();
/// let diff1: FnvIndexSet<_, 16> = a.symmetric_difference(&b).collect();
/// let diff2: FnvIndexSet<_, 16> = b.symmetric_difference(&a).collect();
///
/// assert_eq!(diff1, diff2);
/// assert_eq!(diff1, [1, 4].iter().collect::<FnvIndexSet<_, U16>>());
/// assert_eq!(diff1, [1, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn symmetric_difference<'a, N2, S2>(
pub fn symmetric_difference<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, N2, S2>,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
self.difference(other).chain(other.difference(self))
@ -225,25 +213,23 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect();
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 2, 3 in that order.
/// for x in a.intersection(&b) {
/// println!("{}", x);
/// }
///
/// let intersection: FnvIndexSet<_, U16> = a.intersection(&b).collect();
/// assert_eq!(intersection, [2, 3].iter().collect::<FnvIndexSet<_, U16>>());
/// let intersection: FnvIndexSet<_, 16> = a.intersection(&b).collect();
/// assert_eq!(intersection, [2, 3].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn intersection<'a, N2, S2>(
pub fn intersection<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, N2, S2>,
) -> Intersection<'a, T, N2, S2>
other: &'a IndexSet<T, S2, N2>,
) -> Intersection<'a, T, S2, N2>
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
Intersection {
@ -259,25 +245,23 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, U16> = [4, 2, 3, 4].iter().cloned().collect();
/// let mut a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b: FnvIndexSet<_, 16> = [4, 2, 3, 4].iter().cloned().collect();
///
/// // Print 1, 2, 3, 4 in that order.
/// for x in a.union(&b) {
/// println!("{}", x);
/// }
///
/// let union: FnvIndexSet<_, U16> = a.union(&b).collect();
/// assert_eq!(union, [1, 2, 3, 4].iter().collect::<FnvIndexSet<_, U16>>());
/// let union: FnvIndexSet<_, 16> = a.union(&b).collect();
/// assert_eq!(union, [1, 2, 3, 4].iter().collect::<FnvIndexSet<_, 16>>());
/// ```
pub fn union<'a, N2, S2>(
pub fn union<'a, S2, const N2: usize>(
&'a self,
other: &'a IndexSet<T, N2, S2>,
other: &'a IndexSet<T, S2, N2>,
) -> impl Iterator<Item = &'a T>
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
self.iter().chain(other.difference(self))
@ -289,9 +273,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new();
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert_eq!(v.len(), 0);
/// v.insert(1).unwrap();
/// assert_eq!(v.len(), 1);
@ -306,9 +289,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new();
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// assert!(v.is_empty());
/// v.insert(1).unwrap();
/// assert!(!v.is_empty());
@ -323,9 +305,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut v: FnvIndexSet<_, U16> = FnvIndexSet::new();
/// let mut v: FnvIndexSet<_, 16> = FnvIndexSet::new();
/// v.insert(1).unwrap();
/// v.clear();
/// assert!(v.is_empty());
@ -343,9 +324,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let set: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let set: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// assert_eq!(set.contains(&1), true);
/// assert_eq!(set.contains(&4), false);
/// ```
@ -364,10 +344,9 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let a: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut b = FnvIndexSet::<_, U16>::new();
/// let a: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut b = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(a.is_disjoint(&b), true);
/// b.insert(4).unwrap();
@ -375,9 +354,8 @@ where
/// b.insert(1).unwrap();
/// assert_eq!(a.is_disjoint(&b), false);
/// ```
pub fn is_disjoint<N2, S2>(&self, other: &IndexSet<T, N2, S2>) -> bool
pub fn is_disjoint<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
self.iter().all(|v| !other.contains(v))
@ -390,10 +368,9 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let sup: FnvIndexSet<_, U16> = [1, 2, 3].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, U16>::new();
/// let sup: FnvIndexSet<_, 16> = [1, 2, 3].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_subset(&sup), true);
/// set.insert(2).unwrap();
@ -401,9 +378,8 @@ where
/// set.insert(4).unwrap();
/// assert_eq!(set.is_subset(&sup), false);
/// ```
pub fn is_subset<N2, S2>(&self, other: &IndexSet<T, N2, S2>) -> bool
pub fn is_subset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
self.iter().all(|v| other.contains(v))
@ -416,10 +392,9 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let sub: FnvIndexSet<_, U16> = [1, 2].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, U16>::new();
/// let sub: FnvIndexSet<_, 16> = [1, 2].iter().cloned().collect();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.is_superset(&sub), false);
///
@ -430,9 +405,8 @@ where
/// set.insert(2).unwrap();
/// assert_eq!(set.is_superset(&sub), true);
/// ```
pub fn is_superset<N2, S2>(&self, other: &IndexSet<T, N2, S2>) -> bool
pub fn is_superset<S2, const N2: usize>(&self, other: &IndexSet<T, S2, N2>) -> bool
where
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
S2: BuildHasher,
{
other.is_subset(self)
@ -448,9 +422,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut set = FnvIndexSet::<_, U16>::new();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// assert_eq!(set.insert(2).unwrap(), true);
/// assert_eq!(set.insert(2).unwrap(), false);
@ -472,9 +445,8 @@ where
///
/// ```
/// use heapless::FnvIndexSet;
/// use heapless::consts::*;
///
/// let mut set = FnvIndexSet::<_, U16>::new();
/// let mut set = FnvIndexSet::<_, 16>::new();
///
/// set.insert(2).unwrap();
/// assert_eq!(set.remove(&2), true);
@ -489,11 +461,10 @@ where
}
}
impl<T, N, S> Clone for IndexSet<T, N, S>
impl<T, S, const N: usize> Clone for IndexSet<T, S, N>
where
T: Eq + Hash + Clone,
S: Clone,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn clone(&self) -> Self {
Self {
@ -502,22 +473,20 @@ where
}
}
impl<T, N, S> fmt::Debug for IndexSet<T, N, S>
impl<T, S, const N: usize> fmt::Debug for IndexSet<T, S, N>
where
T: Eq + Hash + fmt::Debug,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl<T, N, S> Default for IndexSet<T, N, S>
impl<T, S, const N: usize> Default for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher + Default,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn default() -> Self {
IndexSet {
@ -526,24 +495,22 @@ where
}
}
impl<T, N1, N2, S1, S2> PartialEq<IndexSet<T, N2, S2>> for IndexSet<T, N1, S1>
impl<T, S1, S2, const N1: usize, const N2: usize> PartialEq<IndexSet<T, S2, N2>>
for IndexSet<T, S1, N1>
where
T: Eq + Hash,
S1: BuildHasher,
S2: BuildHasher,
N1: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
N2: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn eq(&self, other: &IndexSet<T, N2, S2>) -> bool {
fn eq(&self, other: &IndexSet<T, S2, N2>) -> bool {
self.len() == other.len() && self.is_subset(other)
}
}
impl<T, N, S> Extend<T> for IndexSet<T, N, S>
impl<T, S, const N: usize> Extend<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn extend<I>(&mut self, iterable: I)
where
@ -553,11 +520,10 @@ where
}
}
impl<'a, T, N, S> Extend<&'a T> for IndexSet<T, N, S>
impl<'a, T, S, const N: usize> Extend<&'a T> for IndexSet<T, S, N>
where
T: 'a + Eq + Hash + Copy,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn extend<I>(&mut self, iterable: I)
where
@ -567,11 +533,10 @@ where
}
}
impl<T, N, S> FromIterator<T> for IndexSet<T, N, S>
impl<T, S, const N: usize> FromIterator<T> for IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher + Default,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
fn from_iter<I>(iter: I) -> Self
where
@ -583,11 +548,10 @@ where
}
}
impl<'a, T, N, S> IntoIterator for &'a IndexSet<T, N, S>
impl<'a, T, S, const N: usize> IntoIterator for &'a IndexSet<T, S, N>
where
T: Eq + Hash,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
type Item = &'a T;
type IntoIter = Iter<'a, T>;
@ -617,21 +581,19 @@ impl<'a, T> Clone for Iter<'a, T> {
}
}
pub struct Difference<'a, T, N, S>
pub struct Difference<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, N, S>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, N, S> Iterator for Difference<'a, T, N, S>
impl<'a, T, S, const N: usize> Iterator for Difference<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
type Item = &'a T;
@ -645,21 +607,19 @@ where
}
}
pub struct Intersection<'a, T, N, S>
pub struct Intersection<'a, T, S, const N: usize>
where
S: BuildHasher,
T: Eq + Hash,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
iter: Iter<'a, T>,
other: &'a IndexSet<T, N, S>,
other: &'a IndexSet<T, S, N>,
}
impl<'a, T, N, S> Iterator for Intersection<'a, T, N, S>
impl<'a, T, S, const N: usize> Iterator for Intersection<'a, T, S, N>
where
S: BuildHasher,
T: Eq + Hash,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>>,
{
type Item = &'a T;

View File

@ -10,17 +10,14 @@
//!
//! ```
//! use heapless::Vec; // fixed capacity `std::Vec`
//! use heapless::consts::U8; // type level integer used to specify capacity
//!
//! // on the stack
//! let mut xs: Vec<u8, U8> = Vec::new(); // can hold up to 8 elements
//! let mut xs: Vec<u8, 8> = Vec::new(); // can hold up to 8 elements
//! xs.push(42).unwrap();
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in a `static` variable
//! // (because `const-fn` has not been fully stabilized you need to use the helper structs in
//! // the `i` module, which must be wrapped in a tuple struct)
//! static mut XS: Vec<u8, U8> = Vec(heapless::i::Vec::new());
//! static mut XS: Vec<u8, 8> = Vec::new();
//!
//! let xs = unsafe { &mut XS };
//!
@ -28,7 +25,7 @@
//! assert_eq!(xs.pop(), Some(42));
//!
//! // in the heap (though kind of pointless because no reallocation)
//! let mut ys: Box<Vec<u8, U8>> = Box::new(Vec::new());
//! let mut ys: Box<Vec<u8, 8>> = Box::new(Vec::new());
//! ys.push(42).unwrap();
//! assert_eq!(ys.pop(), Some(42));
//! ```
@ -66,7 +63,7 @@
//!
//! # Minimum Supported Rust Version (MSRV)
//!
//! This crate is guaranteed to compile on stable Rust 1.36 and up with its default set of features.
//! This crate is guaranteed to compile on stable Rust 1.51 and up with its default set of features.
//! It *might* compile on older versions but that may change in any new patch release.
#![cfg_attr(not(test), no_std)]
@ -76,8 +73,6 @@
#![deny(warnings)]
pub use binary_heap::BinaryHeap;
pub use generic_array::typenum::{consts, PowerOfTwo};
pub use generic_array::ArrayLength;
pub use histbuf::HistoryBuffer;
pub use indexmap::{Bucket, FnvIndexMap, IndexMap, Pos};
pub use indexset::{FnvIndexSet, IndexSet};
@ -99,7 +94,6 @@ mod de;
mod ser;
pub mod binary_heap;
pub mod i;
#[cfg(all(has_cas, feature = "cas"))]
pub mod mpmc;
#[cfg(all(has_cas, feature = "cas"))]

View File

@ -1,56 +1,37 @@
use core::{
borrow::Borrow,
fmt,
iter::FromIterator,
mem::{self, MaybeUninit},
ops, ptr, slice,
};
use generic_array::{ArrayLength, GenericArray};
use crate::Vec;
use core::{borrow::Borrow, fmt, iter::FromIterator, mem, ops, slice};
/// A fixed capacity map / dictionary that performs lookups via linear search
///
/// Note that as this map doesn't use hashing so most operations are **O(N)** instead of O(1)
pub struct LinearMap<K, V, N>(#[doc(hidden)] pub crate::i::LinearMap<GenericArray<(K, V), N>>)
where
N: ArrayLength<(K, V)>,
K: Eq;
impl<A> crate::i::LinearMap<A> {
/// `LinearMap` `const` constructor; wrap the returned value in
/// [`LinearMap`](../struct.LinearMap.html)
pub const fn new() -> Self {
Self {
buffer: crate::i::Vec::new(),
}
}
pub struct LinearMap<K, V, const N: usize> {
pub(crate) buffer: Vec<(K, V), N>,
}
impl<K, V, N> LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
impl<K, V, const N: usize> LinearMap<K, V, N> {
/// Creates an empty `LinearMap`
///
/// # Examples
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// // allocate the map on the stack
/// let mut map: LinearMap<&str, isize, U8> = LinearMap::new();
/// let mut map: LinearMap<&str, isize, 8> = LinearMap::new();
///
/// // allocate the map in a static variable
/// static mut MAP: LinearMap<&str, isize, U8> = LinearMap(heapless::i::LinearMap::new());
/// static mut MAP: LinearMap<&str, isize, 8> = LinearMap::new();
/// ```
pub fn new() -> Self {
LinearMap(crate::i::LinearMap::new())
pub const fn new() -> Self {
Self { buffer: Vec::new() }
}
}
impl<K, V, const N: usize> LinearMap<K, V, N>
where
K: Eq,
{
/// Returns the number of elements that the map can hold
///
/// Computes in **O(1)** time
@ -59,13 +40,12 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let map: LinearMap<&str, isize, U8> = LinearMap::new();
/// let map: LinearMap<&str, isize, 8> = LinearMap::new();
/// assert_eq!(map.capacity(), 8);
/// ```
pub fn capacity(&self) -> usize {
N::to_usize()
N
}
/// Clears the map, removing all key-value pairs
@ -76,15 +56,14 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// map.clear();
/// assert!(map.is_empty());
/// ```
pub fn clear(&mut self) {
self.0.buffer.clear()
self.buffer.clear()
}
/// Returns true if the map contains a value for the specified key.
@ -95,9 +74,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.contains_key(&1), true);
/// assert_eq!(map.contains_key(&2), false);
@ -114,9 +92,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.get(&1), Some(&"a"));
/// assert_eq!(map.get(&2), None);
@ -139,9 +116,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// if let Some(x) = map.get_mut(&1) {
/// *x = "b";
@ -166,15 +142,14 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut a: LinearMap<_, _, U8> = LinearMap::new();
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(a.len(), 0);
/// a.insert(1, "a").unwrap();
/// assert_eq!(a.len(), 1);
/// ```
pub fn len(&self) -> usize {
self.0.buffer.len
self.buffer.len()
}
/// Inserts a key-value pair into the map.
@ -189,9 +164,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// assert_eq!(map.insert(37, "a").unwrap(), None);
/// assert_eq!(map.is_empty(), false);
///
@ -205,7 +179,7 @@ where
return Ok(Some(value));
}
self.0.buffer.push((key, value))?;
self.buffer.push((key, value))?;
Ok(None)
}
@ -217,9 +191,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut a: LinearMap<_, _, U8> = LinearMap::new();
/// let mut a: LinearMap<_, _, 8> = LinearMap::new();
/// assert!(a.is_empty());
/// a.insert(1, "a").unwrap();
/// assert!(!a.is_empty());
@ -234,9 +207,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -247,7 +219,7 @@ where
/// ```
pub fn iter(&self) -> Iter<'_, K, V> {
Iter {
iter: self.0.buffer.as_slice().iter(),
iter: self.buffer.as_slice().iter(),
}
}
@ -258,9 +230,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -276,7 +247,7 @@ where
/// ```
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
IterMut {
iter: self.0.buffer.as_mut_slice().iter_mut(),
iter: self.buffer.as_mut_slice().iter_mut(),
}
}
@ -286,9 +257,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -310,9 +280,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert(1, "a").unwrap();
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
@ -328,7 +297,7 @@ where
.find(|&(_, k)| k.borrow() == key)
.map(|(idx, _)| idx);
idx.map(|idx| self.0.buffer.swap_remove(idx).1)
idx.map(|idx| self.buffer.swap_remove(idx).1)
}
/// An iterator visiting all values in arbitrary order
@ -337,9 +306,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -358,9 +326,8 @@ where
///
/// ```
/// use heapless::LinearMap;
/// use heapless::consts::*;
///
/// let mut map: LinearMap<_, _, U8> = LinearMap::new();
/// let mut map: LinearMap<_, _, 8> = LinearMap::new();
/// map.insert("a", 1).unwrap();
/// map.insert("b", 2).unwrap();
/// map.insert("c", 3).unwrap();
@ -378,9 +345,8 @@ where
}
}
impl<'a, K, V, N, Q> ops::Index<&'a Q> for LinearMap<K, V, N>
impl<'a, K, V, Q, const N: usize> ops::Index<&'a Q> for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
@ -391,9 +357,8 @@ where
}
}
impl<'a, K, V, N, Q> ops::IndexMut<&'a Q> for LinearMap<K, V, N>
impl<'a, K, V, Q, const N: usize> ops::IndexMut<&'a Q> for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Borrow<Q> + Eq,
Q: Eq + ?Sized,
{
@ -402,9 +367,8 @@ where
}
}
impl<K, V, N> Default for LinearMap<K, V, N>
impl<K, V, const N: usize> Default for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
fn default() -> Self {
@ -412,22 +376,20 @@ where
}
}
impl<K, V, N> Clone for LinearMap<K, V, N>
impl<K, V, const N: usize> Clone for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq + Clone,
V: Clone,
{
fn clone(&self) -> Self {
Self(crate::i::LinearMap {
buffer: self.0.buffer.clone(),
})
Self {
buffer: self.buffer.clone(),
}
}
}
impl<K, V, N> fmt::Debug for LinearMap<K, V, N>
impl<K, V, const N: usize> fmt::Debug for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq + fmt::Debug,
V: fmt::Debug,
{
@ -436,9 +398,8 @@ where
}
}
impl<K, V, N> FromIterator<(K, V)> for LinearMap<K, V, N>
impl<K, V, const N: usize> FromIterator<(K, V)> for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
fn from_iter<I>(iter: I) -> Self
@ -446,22 +407,20 @@ where
I: IntoIterator<Item = (K, V)>,
{
let mut out = Self::new();
out.0.buffer.extend(iter);
out.buffer.extend(iter);
out
}
}
pub struct IntoIter<K, V, N>
pub struct IntoIter<K, V, const N: usize>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
inner: <Vec<(K, V), N> as IntoIterator>::IntoIter,
}
impl<K, V, N> Iterator for IntoIter<K, V, N>
impl<K, V, const N: usize> Iterator for IntoIter<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
type Item = (K, V);
@ -470,28 +429,8 @@ where
}
}
impl<K, V, N> IntoIterator for LinearMap<K, V, N>
impl<'a, K, V, const N: usize> IntoIterator for &'a LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
type Item = (K, V);
type IntoIter = IntoIter<K, V, N>;
fn into_iter(mut self) -> Self::IntoIter {
// FIXME this may result in a memcpy at runtime
let lm = mem::replace(&mut self.0, unsafe { MaybeUninit::uninit().assume_init() });
mem::forget(self);
Self::IntoIter {
inner: crate::Vec(lm.buffer).into_iter(),
}
}
}
impl<'a, K, V, N> IntoIterator for &'a LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
type Item = (&'a K, &'a V);
@ -522,13 +461,12 @@ impl<'a, K, V> Clone for Iter<'a, K, V> {
}
}
impl<K, V, N> Drop for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq,
{
impl<K, V, const N: usize> Drop for LinearMap<K, V, N> {
fn drop(&mut self) {
unsafe { ptr::drop_in_place(self.0.buffer.as_mut_slice()) }
// heapless::Vec implements drop right?
drop(&self.buffer);
// original code below
// unsafe { ptr::drop_in_place(self.buffer.as_mut_slice()) }
}
}
@ -544,12 +482,10 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> {
}
}
impl<K, V, N, N2> PartialEq<LinearMap<K, V, N2>> for LinearMap<K, V, N>
impl<K, V, const N: usize, const N2: usize> PartialEq<LinearMap<K, V, N2>> for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
N: ArrayLength<(K, V)>,
N2: ArrayLength<(K, V)>,
{
fn eq(&self, other: &LinearMap<K, V, N2>) -> bool {
self.len() == other.len()
@ -559,30 +495,29 @@ where
}
}
impl<K, V, N> Eq for LinearMap<K, V, N>
impl<K, V, const N: usize> Eq for LinearMap<K, V, N>
where
K: Eq,
V: PartialEq,
N: ArrayLength<(K, V)>,
{
}
#[cfg(test)]
mod test {
use crate::{consts::*, LinearMap};
use crate::LinearMap;
#[test]
fn static_new() {
static mut _L: LinearMap<i32, i32, U8> = LinearMap(crate::i::LinearMap::new());
static mut _L: LinearMap<i32, i32, 8> = LinearMap::new();
}
#[test]
fn partial_eq() {
{
let mut a = LinearMap::<_, _, U1>::new();
let mut a = LinearMap::<_, _, 1>::new();
a.insert("k1", "v1").unwrap();
let mut b = LinearMap::<_, _, U2>::new();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k1", "v1").unwrap();
assert!(a == b);
@ -593,15 +528,17 @@ mod test {
}
{
let mut a = LinearMap::<_, _, U2>::new();
let mut a = LinearMap::<_, _, 2>::new();
a.insert("k1", "v1").unwrap();
a.insert("k2", "v2").unwrap();
let mut b = LinearMap::<_, _, U2>::new();
let mut b = LinearMap::<_, _, 2>::new();
b.insert("k2", "v2").unwrap();
b.insert("k1", "v1").unwrap();
assert!(a == b);
}
}
// TODO: drop test
}

View File

@ -82,11 +82,13 @@
//!
//! [0]: http://www.1024cores.net/home/lock-free-algorithms/queues/bounded-mpmc-queue
use core::{
cell::UnsafeCell,
mem::MaybeUninit,
sync::atomic::{AtomicU8, Ordering},
};
use core::{cell::UnsafeCell, mem::MaybeUninit};
#[cfg(armv6m)]
use atomic_polyfill::{AtomicU8, Ordering};
#[cfg(not(armv6m))]
use core::sync::atomic::{AtomicU8, Ordering};
/// MPMC queue with a capacity for 2 elements
pub struct Q2<T> {

View File

@ -1,11 +1,13 @@
//! Stack based on LL/SC atomics
pub use core::ptr::NonNull as Ptr;
use core::{
cell::UnsafeCell,
ptr,
sync::atomic::{AtomicPtr, Ordering},
};
use core::{cell::UnsafeCell, ptr};
#[cfg(armv6m)]
use atomic_polyfill::{AtomicPtr, Ordering};
#[cfg(not(armv6m))]
use core::sync::atomic::{AtomicPtr, Ordering};
/// Unfortunate implementation detail required to use the
/// [`Pool.grow_exact`](struct.Pool.html#method.grow_exact) method

View File

@ -236,8 +236,6 @@ use core::{
ptr,
};
use as_slice::{AsMutSlice, AsSlice};
pub use stack::Node;
use stack::{Ptr, Stack};
@ -384,13 +382,13 @@ impl<T> Pool<T> {
/// memory block
pub fn grow_exact<A>(&self, memory: &'static mut MaybeUninit<A>) -> usize
where
A: AsMutSlice<Element = Node<T>>,
A: AsMut<[Node<T>]>,
{
if mem::size_of::<T>() == 0 {
return usize::max_value();
}
let nodes = unsafe { (*memory.as_mut_ptr()).as_mut_slice() };
let nodes = unsafe { (*memory.as_mut_ptr()).as_mut() };
let cap = nodes.len();
for p in nodes {
match () {
@ -441,23 +439,21 @@ unsafe impl<T, S> Sync for Box<T, S> where T: Sync {}
unsafe impl<T> stable_deref_trait::StableDeref for Box<T> {}
impl<A> AsSlice for Box<A>
impl<A, T> AsRef<[T]> for Box<A>
where
A: AsSlice,
A: AsRef<[T]>,
{
type Element = A::Element;
fn as_slice(&self) -> &[A::Element] {
self.deref().as_slice()
fn as_ref(&self) -> &[T] {
self.deref().as_ref()
}
}
impl<A> AsMutSlice for Box<A>
impl<A, T> AsMut<[T]> for Box<A>
where
A: AsMutSlice,
A: AsMut<[T]>,
{
fn as_mut_slice(&mut self) -> &mut [A::Element] {
self.deref_mut().as_mut_slice()
fn as_mut(&mut self) -> &mut [T] {
self.deref_mut().as_mut()
}
}

View File

@ -10,17 +10,17 @@ use core::{
ptr,
};
use as_slice::{AsMutSlice, AsSlice};
use super::{Init, Node, Uninit};
/// Instantiates a pool as a global singleton
// NOTE(any(test)) makes testing easier (no need to enable Cargo features for testing)
#[cfg(any(
armv7a,
armv7r,
armv7m,
armv8m_main,
all(target_arch = "x86_64", feature = "x86-sync-pool"),
test
))]
#[macro_export]
macro_rules! pool {
@ -78,7 +78,7 @@ pub trait Pool {
/// memory block
fn grow_exact<A>(memory: &'static mut MaybeUninit<A>) -> usize
where
A: AsMutSlice<Element = Node<Self::Data>>,
A: AsMut<[Node<Self::Data>]>,
{
Self::ptr().grow_exact(memory)
}
@ -121,7 +121,7 @@ where
impl<P> Box<P, Uninit>
where
P: Pool,
P::Data: AsSlice<Element = u8>,
P::Data: AsRef<[u8]>,
{
/// Freezes the contents of this memory block
///
@ -244,25 +244,23 @@ where
{
}
impl<P, T> AsSlice for Box<P>
impl<P, T> AsRef<[T]> for Box<P>
where
P: Pool,
P::Data: AsSlice<Element = T>,
P::Data: AsRef<[T]>,
{
type Element = T;
fn as_slice(&self) -> &[T] {
self.deref().as_slice()
fn as_ref(&self) -> &[T] {
self.deref().as_ref()
}
}
impl<P, T> AsMutSlice for Box<P>
impl<P, T> AsMut<[T]> for Box<P>
where
P: Pool,
P::Data: AsMutSlice<Element = T>,
P::Data: AsMut<[T]>,
{
fn as_mut_slice(&mut self) -> &mut [T] {
self.deref_mut().as_mut_slice()
fn as_mut(&mut self) -> &mut [T] {
self.deref_mut().as_mut()
}
}

View File

@ -1,28 +1,7 @@
/// Sealed traits and implementations for `spsc`
pub mod spsc {
#[cfg(has_atomics)]
use crate::spsc::{MultiCore, SingleCore};
#[cfg(has_atomics)]
use core::sync::atomic::{self, AtomicU16, AtomicU8, AtomicUsize, Ordering};
pub unsafe trait XCore {
fn is_multi_core() -> bool;
}
#[cfg(has_atomics)]
unsafe impl XCore for SingleCore {
fn is_multi_core() -> bool {
false
}
}
#[cfg(has_atomics)]
unsafe impl XCore for MultiCore {
fn is_multi_core() -> bool {
true
}
}
use core::sync::atomic::{AtomicU16, AtomicU8, AtomicUsize, Ordering};
pub unsafe trait Uxx: Into<usize> + Send {
#[doc(hidden)]
@ -33,9 +12,7 @@ pub mod spsc {
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore;
unsafe fn load_acquire(x: *const Self) -> Self;
#[cfg(has_atomics)]
#[doc(hidden)]
@ -43,9 +20,7 @@ pub mod spsc {
#[cfg(has_atomics)]
#[doc(hidden)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore;
unsafe fn store_release(x: *const Self, val: Self);
}
unsafe impl Uxx for u8 {
@ -63,17 +38,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU8)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicU8)).load(Ordering::Acquire)
}
#[cfg(has_atomics)]
@ -82,16 +48,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU8)).store(val, Ordering::Relaxed) // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicU8)).store(val, Ordering::Release)
}
}
@ -110,17 +68,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicU16)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicU16)).load(Ordering::Acquire)
}
#[cfg(has_atomics)]
@ -129,16 +78,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicU16)).store(val, Ordering::Relaxed) // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicU16)).store(val, Ordering::Release)
}
}
@ -152,17 +93,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn load_acquire<C>(x: *const Self) -> Self
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
} else {
let y = (*(x as *const AtomicUsize)).load(Ordering::Relaxed); // read
atomic::compiler_fence(Ordering::Acquire); // ▼
y
}
unsafe fn load_acquire(x: *const Self) -> Self {
(*(x as *const AtomicUsize)).load(Ordering::Acquire)
}
#[cfg(has_atomics)]
@ -171,16 +103,8 @@ pub mod spsc {
}
#[cfg(has_atomics)]
unsafe fn store_release<C>(x: *const Self, val: Self)
where
C: XCore,
{
if C::is_multi_core() {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
} else {
atomic::compiler_fence(Ordering::Release); // ▲
(*(x as *const AtomicUsize)).store(val, Ordering::Relaxed); // write
}
unsafe fn store_release(x: *const Self, val: Self) {
(*(x as *const AtomicUsize)).store(val, Ordering::Release)
}
}
}

View File

@ -1,19 +1,15 @@
use generic_array::{typenum::PowerOfTwo, ArrayLength};
use crate::{
sealed::binary_heap::Kind as BinaryHeapKind, BinaryHeap, IndexMap, IndexSet, LinearMap, String,
Vec,
};
use hash32::{BuildHasher, Hash};
use serde::ser::{Serialize, SerializeMap, SerializeSeq, Serializer};
use crate::{
indexmap::{Bucket, Pos},
sealed::binary_heap::Kind as BinaryHeapKind,
BinaryHeap, IndexMap, IndexSet, LinearMap, String, Vec,
};
// Sequential containers
impl<T, N, KIND> Serialize for BinaryHeap<T, N, KIND>
impl<T, KIND, const N: usize> Serialize for BinaryHeap<T, KIND, N>
where
T: Ord + Serialize,
N: ArrayLength<T>,
KIND: BinaryHeapKind,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@ -28,11 +24,10 @@ where
}
}
impl<T, N, S> Serialize for IndexSet<T, N, S>
impl<T, S, const N: usize> Serialize for IndexSet<T, S, N>
where
T: Eq + Hash + Serialize,
S: BuildHasher,
N: ArrayLength<Bucket<T, ()>> + ArrayLength<Option<Pos>> + PowerOfTwo,
{
fn serialize<SER>(&self, serializer: SER) -> Result<SER::Ok, SER::Error>
where
@ -46,10 +41,9 @@ where
}
}
impl<T, N> Serialize for Vec<T, N>
impl<T, const N: usize> Serialize for Vec<T, N>
where
T: Serialize,
N: ArrayLength<T>,
{
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
@ -65,10 +59,9 @@ where
// Dictionaries
impl<K, V, N, S> Serialize for IndexMap<K, V, N, S>
impl<K, V, S, const N: usize> Serialize for IndexMap<K, V, S, N>
where
K: Eq + Hash + Serialize,
N: ArrayLength<Bucket<K, V>> + ArrayLength<Option<Pos>>,
S: BuildHasher,
V: Serialize,
{
@ -84,9 +77,8 @@ where
}
}
impl<K, V, N> Serialize for LinearMap<K, V, N>
impl<K, V, const N: usize> Serialize for LinearMap<K, V, N>
where
N: ArrayLength<(K, V)>,
K: Eq + Serialize,
V: Serialize,
{
@ -104,10 +96,7 @@ where
// String containers
impl<N> Serialize for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> Serialize for String<N> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,

897
src/spsc.rs Normal file
View File

@ -0,0 +1,897 @@
//! Fixed capacity Single Producer Single Consumer (SPSC) queue
//!
//! Implementation based on https://www.codeproject.com/Articles/43510/Lock-Free-Single-Producer-Single-Consumer-Circular
//!
//! NOTE: This module is not available on targets that do *not* support atomic loads, e.g. RISC-V
//! cores w/o the A (Atomic) extension
//!
//! # Examples
//!
//! - `Queue` can be used as a plain queue
//!
//! ```
//! use heapless::spsc::Queue;
//!
//! let mut rb: Queue<u8, 4> = Queue::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
//! assert!(rb.enqueue(2).is_ok());
//! assert!(rb.enqueue(3).is_err()); // full
//!
//! assert_eq!(rb.dequeue(), Some(0));
//! ```
//!
//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode
//!
//! ```
//! use heapless::spsc::Queue;
//!
//! // Notice, type signature needs to be explicit for now.
//! // (min_const_eval, does not allow for default type assignments)
//! static mut Q: Queue<Event, 4> = Queue::new();
//!
//! enum Event { A, B }
//!
//! fn main() {
//! // NOTE(unsafe) beware of aliasing the `consumer` end point
//! let mut consumer = unsafe { Q.split().1 };
//!
//! loop {
//! // `dequeue` is a lockless operation
//! match consumer.dequeue() {
//! Some(Event::A) => { /* .. */ },
//! Some(Event::B) => { /* .. */ },
//! None => { /* sleep */ },
//! }
//! # break
//! }
//! }
//!
//! // this is a different execution context that can preempt `main`
//! fn interrupt_handler() {
//! // NOTE(unsafe) beware of aliasing the `producer` end point
//! let mut producer = unsafe { Q.split().0 };
//! # let condition = true;
//!
//! // ..
//!
//! if condition {
//! producer.enqueue(Event::A).ok().unwrap();
//! } else {
//! producer.enqueue(Event::B).ok().unwrap();
//! }
//!
//! // ..
//! }
//! ```
//!
//! # Benchmarks
//!
//! Measured on a ARM Cortex-M3 core running at 8 MHz and with zero Flash wait cycles
//!
//! `-C opt-level` |`3`|
//! -----------------------|---|
//! `Consumer<u8>::dequeue`| 15|
//! `Queue<u8>::dequeue` | 12|
//! `Producer<u8>::enqueue`| 16|
//! `Queue<u8>::enqueue` | 14|
//!
//! - All execution times are in clock cycles. 1 clock cycle = 125 ns.
//! - Execution time is *dependent* of `mem::size_of::<T>()`. Both operations include one
//! `memcpy(T)` in their successful path.
//! - The optimization level is indicated in the first row.
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
use core::{
cell::UnsafeCell,
fmt, hash,
mem::MaybeUninit,
ptr,
sync::atomic::{AtomicUsize, Ordering},
};
/// A statically allocated single producer single consumer queue with a capacity of `N - 1` elements
///
/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `16`, `32`,
/// etc.).
pub struct Queue<T, const N: usize> {
// this is from where we dequeue items
pub(crate) head: AtomicUsize,
// this is where we enqueue new items
pub(crate) tail: AtomicUsize,
pub(crate) buffer: [UnsafeCell<MaybeUninit<T>>; N],
}
impl<T, const N: usize> Queue<T, N> {
const INIT: UnsafeCell<MaybeUninit<T>> = UnsafeCell::new(MaybeUninit::uninit());
#[inline]
fn increment(val: usize) -> usize {
(val + 1) % N
}
/// Creates an empty queue with a fixed capacity of `N - 1`
pub const fn new() -> Self {
Queue {
head: AtomicUsize::new(0),
tail: AtomicUsize::new(0),
buffer: [Self::INIT; N],
}
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub const fn capacity(&self) -> usize {
N - 1
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
let current_head = self.head.load(Ordering::Relaxed);
let current_tail = self.tail.load(Ordering::Relaxed);
current_tail.wrapping_sub(current_head).wrapping_add(N) % N
}
/// Returns `true` if the queue is empty
#[inline]
pub fn is_empty(&self) -> bool {
self.head.load(Ordering::Relaxed) == self.tail.load(Ordering::Relaxed)
}
/// Returns `true` if the queue is full
#[inline]
pub fn is_full(&self) -> bool {
Self::increment(self.tail.load(Ordering::Relaxed)) == self.head.load(Ordering::Relaxed)
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<'_, T, N> {
Iter {
rb: self,
index: 0,
len: self.len(),
}
}
/// Returns an iterator that allows modifying each value
pub fn iter_mut(&mut self) -> IterMut<'_, T, N> {
let len = self.len();
IterMut {
rb: self,
index: 0,
len,
}
}
/// Adds an `item` to the end of the queue
///
/// Returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.inner_enqueue(val) }
}
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.inner_dequeue() }
}
/// Returns a reference to the item in the front of the queue without dequeuing, or
/// `None` if the queue is empty.
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
pub fn peek(&self) -> Option<&T> {
if !self.is_empty() {
let head = self.head.load(Ordering::Relaxed);
Some(unsafe { &*(self.buffer.get_unchecked(head).get() as *const T) })
} else {
None
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue(&self, val: T) -> Result<(), T> {
let current_tail = self.tail.load(Ordering::Relaxed);
let next_tail = Self::increment(current_tail);
if next_tail != self.head.load(Ordering::Acquire) {
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail.store(next_tail, Ordering::Release);
Ok(())
} else {
Err(val)
}
}
// The memory for enqueueing is "owned" by the tail pointer.
// NOTE: This internal function uses internal mutability to allow the [`Producer`] to enqueue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_enqueue_unchecked(&self, val: T) {
let current_tail = self.tail.load(Ordering::Relaxed);
(self.buffer.get_unchecked(current_tail).get()).write(MaybeUninit::new(val));
self.tail
.store(Self::increment(current_tail), Ordering::Release);
}
/// Adds an `item` to the end of the queue, without checking if it's full
///
/// # Unsafety
///
/// If the queue is full this operation will leak a value (T's destructor won't run on
/// the value that got overwritten by `item`), *and* will allow the `dequeue` operation
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
/// twice.
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.inner_enqueue_unchecked(val)
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue(&self) -> Option<T> {
let current_head = self.head.load(Ordering::Relaxed);
if current_head == self.tail.load(Ordering::Acquire) {
None
} else {
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
Some(v)
}
}
// The memory for dequeuing is "owned" by the head pointer,.
// NOTE: This internal function uses internal mutability to allow the [`Consumer`] to dequeue
// items without doing pointer arithmetic and accessing internal fields of this type.
unsafe fn inner_dequeue_unchecked(&self) -> T {
let current_head = self.head.load(Ordering::Relaxed);
let v = (self.buffer.get_unchecked(current_head).get() as *const T).read();
self.head
.store(Self::increment(current_head), Ordering::Release);
v
}
/// Returns the item in the front of the queue, without checking if there is something in the
/// queue
///
/// # Unsafety
///
/// If the queue is empty this operation will return uninitialized memory.
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.inner_dequeue_unchecked()
}
/// Splits a queue into producer and consumer endpoints
pub fn split(&mut self) -> (Producer<'_, T, N>, Consumer<'_, T, N>) {
(Producer { rb: self }, Consumer { rb: self })
}
}
impl<T, const N: usize> Clone for Queue<T, N>
where
T: Clone,
{
fn clone(&self) -> Self {
let mut new: Queue<T, N> = Queue::new();
for s in self.iter() {
unsafe {
// NOTE(unsafe) new.capacity() == self.capacity() <= self.len()
// no overflow possible
new.enqueue_unchecked(s.clone());
}
}
new
}
}
impl<T, const N: usize, const N2: usize> PartialEq<Queue<T, N2>> for Queue<T, N>
where
T: PartialEq,
{
fn eq(&self, other: &Queue<T, N2>) -> bool {
self.len() == other.len() && self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2)
}
}
impl<T, const N: usize> Eq for Queue<T, N> where T: Eq {}
/// An iterator over the items of a queue
pub struct Iter<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Clone for Iter<'a, T, N> {
fn clone(&self) -> Self {
Self {
rb: self.rb,
index: self.index,
len: self.len,
}
}
}
/// A mutable iterator over the items of a queue
pub struct IterMut<'a, T, const N: usize> {
rb: &'a mut Queue<T, N>,
index: usize,
len: usize,
}
impl<'a, T, const N: usize> Iterator for Iter<'a, T, N> {
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> Iterator for IterMut<'a, T, N> {
type Item = &'a mut T;
fn next(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
let i = (head + self.index) % N;
self.index += 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for Iter<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &*(self.rb.buffer.get_unchecked(i).get() as *const T) })
} else {
None
}
}
}
impl<'a, T, const N: usize> DoubleEndedIterator for IterMut<'a, T, N> {
fn next_back(&mut self) -> Option<Self::Item> {
if self.index < self.len {
let head = self.rb.head.load(Ordering::Relaxed);
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % N;
self.len -= 1;
Some(unsafe { &mut *(self.rb.buffer.get_unchecked(i).get() as *mut T) })
} else {
None
}
}
}
impl<T, const N: usize> Drop for Queue<T, N> {
fn drop(&mut self) {
for item in self {
unsafe {
ptr::drop_in_place(item);
}
}
}
}
impl<T, const N: usize> fmt::Debug for Queue<T, N>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, const N: usize> hash::Hash for Queue<T, N>
where
T: hash::Hash,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
// iterate over self in order
for t in self.iter() {
hash::Hash::hash(t, state);
}
}
}
impl<T, const N: usize> hash32::Hash for Queue<T, N>
where
T: hash32::Hash,
{
fn hash<H: hash32::Hasher>(&self, state: &mut H) {
// iterate over self in order
for t in self.iter() {
hash32::Hash::hash(t, state);
}
}
}
impl<'a, T, const N: usize> IntoIterator for &'a Queue<T, N> {
type Item = &'a T;
type IntoIter = Iter<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, const N: usize> IntoIterator for &'a mut Queue<T, N> {
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
}
}
/// A queue "consumer"; it can dequeue items from the queue
/// NOTE the consumer semantically owns the `head` pointer of the queue
pub struct Consumer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Consumer<'a, T, N> where T: Send {}
/// A queue "producer"; it can enqueue items into the queue
/// NOTE the producer semantically owns the `tail` pointer of the queue
pub struct Producer<'a, T, const N: usize> {
rb: &'a Queue<T, N>,
}
unsafe impl<'a, T, const N: usize> Send for Producer<'a, T, N> where T: Send {}
impl<'a, T, const N: usize> Consumer<'a, T, N> {
/// Returns the item in the front of the queue, or `None` if the queue is empty
#[inline]
pub fn dequeue(&mut self) -> Option<T> {
unsafe { self.rb.inner_dequeue() }
}
/// Returns the item in the front of the queue, without checking if there are elements in the
/// queue
///
/// See [`Queue::dequeue_unchecked`] for safety
#[inline]
pub unsafe fn dequeue_unchecked(&mut self) -> T {
self.rb.inner_dequeue_unchecked()
}
/// Returns if there are any items to dequeue. When this returns `true`, at least the
/// first subsequent dequeue will succeed
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_empty()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
/// Returns the item in the front of the queue without dequeuing, or `None` if the queue is
/// empty
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
///
/// let mut queue: Queue<u8, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
/// assert_eq!(Some(&1), consumer.peek());
/// assert_eq!(Some(1), consumer.dequeue());
/// assert_eq!(None, consumer.peek());
/// ```
#[inline]
pub fn peek(&self) -> Option<&T> {
self.rb.peek()
}
}
impl<'a, T, const N: usize> Producer<'a, T, N> {
/// Adds an `item` to the end of the queue, returns back the `item` if the queue is full
#[inline]
pub fn enqueue(&mut self, val: T) -> Result<(), T> {
unsafe { self.rb.inner_enqueue(val) }
}
/// Adds an `item` to the end of the queue, without checking if the queue is full
///
/// See [`Queue::enqueue_unchecked`] for safety
#[inline]
pub unsafe fn enqueue_unchecked(&mut self, val: T) {
self.rb.inner_enqueue_unchecked(val)
}
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
#[inline]
pub fn ready(&self) -> bool {
!self.rb.is_full()
}
/// Returns the number of elements in the queue
#[inline]
pub fn len(&self) -> usize {
self.rb.len()
}
/// Returns the maximum number of elements the queue can hold
#[inline]
pub fn capacity(&self) -> usize {
self.rb.capacity()
}
}
#[cfg(test)]
mod tests {
use crate::spsc::Queue;
use hash32::Hasher;
#[test]
fn full() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_full(), false);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_full(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_full(), true);
}
#[test]
fn empty() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.is_empty(), true);
rb.enqueue(1).unwrap();
assert_eq!(rb.is_empty(), false);
rb.enqueue(2).unwrap();
assert_eq!(rb.is_empty(), false);
}
#[test]
fn len() {
let mut rb: Queue<i32, 3> = Queue::new();
assert_eq!(rb.len(), 0);
rb.enqueue(1).unwrap();
assert_eq!(rb.len(), 1);
rb.enqueue(2).unwrap();
assert_eq!(rb.len(), 2);
for _ in 0..1_000_000 {
let v = rb.dequeue().unwrap();
println!("{}", v);
rb.enqueue(v).unwrap();
assert_eq!(rb.len(), 2);
}
}
#[test]
fn try_overflow() {
const N: usize = 23;
let mut rb: Queue<i32, N> = Queue::new();
for i in 0..N as i32 - 1 {
rb.enqueue(i).unwrap();
}
for _ in 0..1_000_000 {
for i in 0..N as i32 - 1 {
let d = rb.dequeue().unwrap();
assert_eq!(d, i);
rb.enqueue(i).unwrap();
}
}
}
#[test]
fn sanity() {
let mut rb: Queue<i32, 10> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(p.ready(), true);
assert_eq!(c.ready(), false);
assert_eq!(c.dequeue(), None);
p.enqueue(0).unwrap();
assert_eq!(c.dequeue(), Some(0));
}
#[test]
fn static_new() {
static mut _Q: Queue<i32, 4> = Queue::new();
}
#[test]
fn drop() {
struct Droppable;
impl Droppable {
fn new() -> Self {
unsafe {
COUNT += 1;
}
Droppable
}
}
impl Drop for Droppable {
fn drop(&mut self) {
unsafe {
COUNT -= 1;
}
}
}
static mut COUNT: i32 = 0;
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
v.dequeue().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Queue<Droppable, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
}
assert_eq!(unsafe { COUNT }, 0);
}
#[test]
fn iter() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.dequeue().unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.enqueue(3).unwrap();
let mut items = rb.iter();
// assert_eq!(items.next(), Some(&0));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), Some(&2));
assert_eq!(items.next(), Some(&3));
assert_eq!(items.next(), None);
}
#[test]
fn iter_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter();
assert_eq!(items.next(), Some(&0));
assert_eq!(items.next_back(), Some(&2));
assert_eq!(items.next(), Some(&1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn iter_mut() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), Some(&mut 2));
assert_eq!(items.next(), None);
}
#[test]
fn iter_mut_double_ended() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
let mut items = rb.iter_mut();
assert_eq!(items.next(), Some(&mut 0));
assert_eq!(items.next_back(), Some(&mut 2));
assert_eq!(items.next(), Some(&mut 1));
assert_eq!(items.next(), None);
assert_eq!(items.next_back(), None);
}
#[test]
fn wrap_around() {
let mut rb: Queue<i32, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
rb.enqueue(2).unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.dequeue().unwrap();
rb.enqueue(3).unwrap();
rb.enqueue(4).unwrap();
assert_eq!(rb.len(), 2);
}
#[test]
fn ready_flag() {
let mut rb: Queue<i32, 3> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
p.enqueue(0).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
p.enqueue(1).unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), false);
c.dequeue().unwrap();
assert_eq!(c.ready(), true);
assert_eq!(p.ready(), true);
c.dequeue().unwrap();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
}
#[test]
fn clone() {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let rb2 = rb1.clone();
assert_eq!(rb1.capacity(), rb2.capacity());
assert_eq!(rb1.len(), rb2.len());
assert!(rb1.iter().zip(rb2.iter()).all(|(v1, v2)| v1 == v2));
}
#[test]
fn eq() {
// generate two queues with same content
// but different buffer alignment
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
assert!(rb1 == rb2);
// test for symmetry
assert!(rb2 == rb1);
// test for changes in content
rb1.enqueue(0).unwrap();
assert!(rb1 != rb2);
rb2.enqueue(1).unwrap();
assert!(rb1 != rb2);
// test for refexive relation
assert!(rb1 == rb1);
assert!(rb2 == rb2);
}
#[test]
fn hash_equality() {
// generate two queues with same content
// but different buffer alignment
let rb1 = {
let mut rb1: Queue<i32, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
rb1
};
let rb2 = {
let mut rb2: Queue<i32, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
rb2
};
let hash1 = {
let mut hasher1 = hash32::FnvHasher::default();
hash32::Hash::hash(&rb1, &mut hasher1);
let hash1 = hasher1.finish();
hash1
};
let hash2 = {
let mut hasher2 = hash32::FnvHasher::default();
hash32::Hash::hash(&rb2, &mut hasher2);
let hash2 = hasher2.finish();
hash2
};
assert_eq!(hash1, hash2);
}
}

View File

@ -9,9 +9,8 @@
//!
//! ```
//! use heapless::spsc::Queue;
//! use heapless::consts::*;
//!
//! let mut rb: Queue<u8, U4> = Queue::new();
//! let mut rb: Queue<u8, _, 4> = Queue::new();
//!
//! assert!(rb.enqueue(0).is_ok());
//! assert!(rb.enqueue(1).is_ok());
@ -25,10 +24,11 @@
//! - `Queue` can be `split` and then be used in Single Producer Single Consumer mode
//!
//! ```
//! use heapless::spsc::Queue;
//! use heapless::consts::*;
//! use heapless::spsc::{Queue};
//!
//! static mut Q: Queue<Event, U4> = Queue(heapless::i::Queue::new());
//! // Notice, type signature needs to be explicit for now.
//! // (min_const_eval, does not allow for default type assignments)
//! static mut Q: Queue<Event, usize, 4> = Queue::new();
//!
//! enum Event { A, B }
//!
@ -83,9 +83,8 @@
//! - The numbers reported correspond to the successful path (i.e. `Some` is returned by `dequeue`
//! and `Ok` is returned by `enqueue`).
use core::{cell::UnsafeCell, fmt, hash, marker::PhantomData, mem::MaybeUninit, ptr};
use core::{cell::UnsafeCell, fmt, hash, mem::MaybeUninit, ptr};
use generic_array::{ArrayLength, GenericArray};
use hash32;
use crate::sealed::spsc as sealed;
@ -93,32 +92,23 @@ pub use split::{Consumer, Producer};
mod split;
/// Multi core synchronization - a memory barrier is used for synchronization
pub struct MultiCore;
/// Single core synchronization - no memory barrier synchronization, just a compiler fence
pub struct SingleCore;
// Atomic{U8,U16, Usize} with no CAS operations that works on targets that have "no atomic support"
// Atomic{U8, U16, Usize} with no CAS operations that works on targets that have "no atomic support"
// according to their specification
pub(crate) struct Atomic<U, C> {
pub(crate) struct Atomic<U> {
v: UnsafeCell<U>,
c: PhantomData<C>,
}
impl<U, C> Atomic<U, C> {
impl<U> Atomic<U> {
pub(crate) const fn new(v: U) -> Self {
Atomic {
v: UnsafeCell::new(v),
c: PhantomData,
}
}
}
impl<U, C> Atomic<U, C>
impl<U> Atomic<U>
where
U: sealed::Uxx,
C: sealed::XCore,
{
fn get(&self) -> &U {
unsafe { &*self.v.get() }
@ -129,7 +119,7 @@ where
}
fn load_acquire(&self) -> U {
unsafe { U::load_acquire::<C>(self.v.get()) }
unsafe { U::load_acquire(self.v.get()) }
}
fn load_relaxed(&self) -> U {
@ -137,13 +127,13 @@ where
}
fn store_release(&self, val: U) {
unsafe { U::store_release::<C>(self.v.get(), val) }
unsafe { U::store_release(self.v.get(), val) }
}
}
/// A statically allocated single producer single consumer queue with a capacity of `N` elements
///
/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `U16`, `U32`,
/// *IMPORTANT*: To get better performance use a capacity that is a power of 2 (e.g. `16`, `32`,
/// etc.).
///
/// By default `spsc::Queue` will use `usize` integers to hold the indices to its head and tail. For
@ -154,30 +144,30 @@ where
/// [`u8`]: struct.Queue.html#method.u8
/// [`u16`]: struct.Queue.html#method.u16
///
/// *IMPORTANT*: `spsc::Queue<_, _, u8>` has a maximum capacity of 255 elements; `spsc::Queue<_, _,
/// u16>` has a maximum capacity of 65535 elements.
///
/// `spsc::Queue` also comes in a single core variant. This variant can be created using the
/// following constructors: `u8_sc`, `u16_sc`, `usize_sc` and `new_sc`. This variant is `unsafe` to
/// create because the programmer must make sure that the queue's consumer and producer endpoints
/// (if split) are kept on a single core for their entire lifetime.
pub struct Queue<T, N, U = usize, C = MultiCore>(
#[doc(hidden)] pub crate::i::Queue<GenericArray<T, N>, U, C>,
)
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore;
/// *IMPORTANT*: `spsc::Queue<_, u8, N>` has a maximum capacity of 255 elements; `spsc::Queue<_,
/// u16, N>` has a maximum capacity of 65535 elements.
impl<T, N, U, C> Queue<T, N, U, C>
#[cfg(has_atomics)]
pub struct Queue<T, U, const N: usize>
where
U: sealed::Uxx,
{
// this is from where we dequeue items
pub(crate) head: Atomic<U>,
// this is where we enqueue new items
pub(crate) tail: Atomic<U>,
pub(crate) buffer: MaybeUninit<[T; N]>,
}
impl<T, U, const N: usize> Queue<T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
/// Returns the maximum number of elements the queue can hold
pub fn capacity(&self) -> U {
U::saturate(N::to_usize())
U::saturate(N)
}
/// Returns `true` if the queue has a length of 0
@ -186,7 +176,7 @@ where
}
/// Iterates from the front of the queue to the back
pub fn iter(&self) -> Iter<'_, T, N, U, C> {
pub fn iter(&self) -> Iter<'_, T, U, N> {
Iter {
rb: self,
index: 0,
@ -195,7 +185,7 @@ where
}
/// Returns an iterator that allows modifying each value.
pub fn iter_mut(&mut self) -> IterMut<'_, T, N, U, C> {
pub fn iter_mut(&mut self) -> IterMut<'_, T, U, N> {
let len = self.len_usize();
IterMut {
rb: self,
@ -205,18 +195,16 @@ where
}
fn len_usize(&self) -> usize {
let head = self.0.head.load_relaxed().into();
let tail = self.0.tail.load_relaxed().into();
let head = self.head.load_relaxed().into();
let tail = self.tail.load_relaxed().into();
U::truncate(tail.wrapping_sub(head)).into()
}
}
impl<T, N, U, C> Drop for Queue<T, N, U, C>
impl<T, U, const N: usize> Drop for Queue<T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
fn drop(&mut self) {
for item in self {
@ -227,24 +215,20 @@ where
}
}
impl<T, N, U, C> fmt::Debug for Queue<T, N, U, C>
impl<T, U, const N: usize> fmt::Debug for Queue<T, U, N>
where
N: ArrayLength<T>,
T: fmt::Debug,
U: sealed::Uxx,
C: sealed::XCore,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
impl<T, N, U, C> hash::Hash for Queue<T, N, U, C>
impl<T, U, const N: usize> hash::Hash for Queue<T, U, N>
where
N: ArrayLength<T>,
T: hash::Hash,
U: sealed::Uxx,
C: sealed::XCore,
{
fn hash<H: hash::Hasher>(&self, state: &mut H) {
// iterate over self in order
@ -254,12 +238,10 @@ where
}
}
impl<T, N, U, C> hash32::Hash for Queue<T, N, U, C>
impl<T, U, const N: usize> hash32::Hash for Queue<T, U, N>
where
N: ArrayLength<T>,
T: hash32::Hash,
U: sealed::Uxx,
C: sealed::XCore,
{
fn hash<H: hash32::Hasher>(&self, state: &mut H) {
// iterate over self in order
@ -269,28 +251,24 @@ where
}
}
impl<'a, T, N, U, C> IntoIterator for &'a Queue<T, N, U, C>
impl<'a, T, U, const N: usize> IntoIterator for &'a Queue<T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = &'a T;
type IntoIter = Iter<'a, T, N, U, C>;
type IntoIter = Iter<'a, T, U, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl<'a, T, N, U, C> IntoIterator for &'a mut Queue<T, N, U, C>
impl<'a, T, U, const N: usize> IntoIterator for &'a mut Queue<T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = &'a mut T;
type IntoIter = IterMut<'a, T, N, U, C>;
type IntoIter = IterMut<'a, T, U, N>;
fn into_iter(self) -> Self::IntoIter {
self.iter_mut()
@ -298,65 +276,27 @@ where
}
macro_rules! impl_ {
($uxx:ident, $uxx_sc:ident) => {
impl<T, N> Queue<T, N, $uxx, MultiCore>
where
N: ArrayLength<T>,
{
/// Creates an empty queue with a fixed capacity of `N`
pub fn $uxx() -> Self {
Queue(crate::i::Queue::$uxx())
}
}
impl<A> crate::i::Queue<A, $uxx, MultiCore> {
/// `spsc::Queue` `const` constructor; wrap the returned value in
/// [`spsc::Queue`](struct.Queue.html)
pub const fn $uxx() -> Self {
crate::i::Queue {
buffer: MaybeUninit::uninit(),
($uxx:ident, $doc:tt $(,$unsf:ident)?) => {
impl<T, const N: usize> Queue<T, $uxx, N> {
#[doc = $doc]
pub const $($unsf)* fn $uxx() -> Self {
Self {
head: Atomic::new(0),
tail: Atomic::new(0),
buffer: MaybeUninit::uninit(),
}
}
}
impl<T, N> Queue<T, N, $uxx, SingleCore>
where
N: ArrayLength<T>,
{
/// Creates an empty queue with a fixed capacity of `N` (single core variant)
pub unsafe fn $uxx_sc() -> Self {
Queue(crate::i::Queue::$uxx_sc())
}
}
impl<A> crate::i::Queue<A, $uxx, SingleCore> {
/// `spsc::Queue` `const` constructor; wrap the returned value in
/// [`spsc::Queue`](struct.Queue.html)
pub const unsafe fn $uxx_sc() -> Self {
crate::i::Queue {
buffer: MaybeUninit::uninit(),
head: Atomic::new(0),
tail: Atomic::new(0),
}
}
}
impl<T, N, C> Queue<T, N, $uxx, C>
where
N: ArrayLength<T>,
C: sealed::XCore,
{
impl<T, const N: usize> Queue<T, $uxx, N> {
/// Returns a reference to the item in the front of the queue without dequeuing, or
/// `None` if the queue is empty.
///
/// # Examples
/// ```
/// use heapless::spsc::Queue;
/// use heapless::consts::*;
///
/// let mut queue: Queue<u8, U235, _> = Queue::u8();
/// let mut queue: Queue<u8, _, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
@ -367,10 +307,10 @@ macro_rules! impl_ {
pub fn peek(&self) -> Option<&T> {
let cap = self.capacity();
let head = self.0.head.get();
let tail = self.0.tail.get();
let head = self.head.get();
let tail = self.tail.get();
let p = self.0.buffer.as_ptr();
let p = self.buffer.as_ptr();
if *head != *tail {
let item = unsafe { &*(p as *const T).add(usize::from(*head % cap)) };
@ -384,10 +324,10 @@ macro_rules! impl_ {
pub fn dequeue(&mut self) -> Option<T> {
let cap = self.capacity();
let head = self.0.head.get_mut();
let tail = self.0.tail.get_mut();
let head = self.head.get_mut();
let tail = self.tail.get_mut();
let p = self.0.buffer.as_ptr();
let p = self.buffer.as_ptr();
if *head != *tail {
let item = unsafe { (p as *const T).add(usize::from(*head % cap)).read() };
@ -403,8 +343,8 @@ macro_rules! impl_ {
/// Returns back the `item` if the queue is full
pub fn enqueue(&mut self, item: T) -> Result<(), T> {
let cap = self.capacity();
let head = *self.0.head.get_mut();
let tail = *self.0.tail.get_mut();
let head = *self.head.get_mut();
let tail = *self.tail.get_mut();
if tail.wrapping_sub(head) > cap - 1 {
Err(item)
@ -424,12 +364,12 @@ macro_rules! impl_ {
/// twice.
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
let cap = self.capacity();
let tail = self.0.tail.get_mut();
let tail = self.tail.get_mut();
// NOTE(ptr::write) the memory slot that we are about to write to is
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
// uninitialized memory
(self.0.buffer.as_mut_ptr() as *mut T)
(self.buffer.as_mut_ptr() as *mut T)
.add(usize::from(*tail % cap))
.write(item);
*tail = tail.wrapping_add(1);
@ -437,24 +377,22 @@ macro_rules! impl_ {
/// Returns the number of elements in the queue
pub fn len(&self) -> $uxx {
let head = self.0.head.load_relaxed();
let tail = self.0.tail.load_relaxed();
let head = self.head.load_relaxed();
let tail = self.tail.load_relaxed();
tail.wrapping_sub(head)
}
}
impl<T, N, C> Clone for Queue<T, N, $uxx, C>
impl<T, const N: usize> Clone for Queue<T, $uxx, N>
where
T: Clone,
N: ArrayLength<T>,
C: sealed::XCore,
{
fn clone(&self) -> Self {
let mut new: Queue<T, N, $uxx, C> = Queue(crate::i::Queue {
let mut new: Queue<T, $uxx, N> = Queue {
buffer: MaybeUninit::uninit(),
head: Atomic::new(0),
tail: Atomic::new(0),
});
};
for s in self.iter() {
unsafe {
@ -469,88 +407,57 @@ macro_rules! impl_ {
};
}
impl<A> crate::i::Queue<A, usize, MultiCore> {
/// `spsc::Queue` `const` constructor; wrap the returned value in
/// [`spsc::Queue`](struct.Queue.html)
pub const fn new() -> Self {
crate::i::Queue::usize()
}
}
impl<T, N> Queue<T, N, usize, MultiCore>
where
N: ArrayLength<T>,
{
impl<T, const N: usize> Queue<T, usize, N> {
/// Alias for [`spsc::Queue::usize`](struct.Queue.html#method.usize)
pub fn new() -> Self {
Queue(crate::i::Queue::new())
pub const fn new() -> Self {
Queue::usize()
}
}
impl<A> crate::i::Queue<A, usize, SingleCore> {
/// `spsc::Queue` `const` constructor; wrap the returned value in
/// [`spsc::Queue`](struct.Queue.html)
pub const unsafe fn new_sc() -> Self {
crate::i::Queue::usize_sc()
}
}
impl_!(
u8,
"Creates an empty queue with a fixed capacity of `N`. **Safety**: Assumes `N <= u8::MAX`.",
unsafe
);
impl_!(
u16,
"Creates an empty queue with a fixed capacity of `N`. **Safety**: Assumes `N <= u16::MAX`.",
unsafe
);
impl_!(usize, "Creates an empty queue with a fixed capacity of `N`");
impl<T, N> Queue<T, N, usize, SingleCore>
where
N: ArrayLength<T>,
{
/// Alias for [`spsc::Queue::usize_sc`](struct.Queue.html#method.usize_sc)
pub unsafe fn new_sc() -> Self {
Queue(crate::i::Queue::new_sc())
}
}
impl_!(u8, u8_sc);
impl_!(u16, u16_sc);
impl_!(usize, usize_sc);
impl<T, N, U, C, N2, U2, C2> PartialEq<Queue<T, N2, U2, C2>> for Queue<T, N, U, C>
impl<T, U, U2, const N: usize, const N2: usize> PartialEq<Queue<T, U2, N2>> for Queue<T, U, N>
where
T: PartialEq,
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
N2: ArrayLength<T>,
U2: sealed::Uxx,
C2: sealed::XCore,
{
fn eq(&self, other: &Queue<T, N2, U2, C2>) -> bool {
fn eq(&self, other: &Queue<T, U2, N2>) -> bool {
self.len_usize() == other.len_usize()
&& self.iter().zip(other.iter()).all(|(v1, v2)| v1 == v2)
}
}
impl<T, N, U, C> Eq for Queue<T, N, U, C>
impl<T, U, const N: usize> Eq for Queue<T, U, N>
where
T: Eq,
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
}
/// An iterator over the items of a queue
pub struct Iter<'a, T, N, U, C>
pub struct Iter<'a, T, U, const N: usize>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
rb: &'a Queue<T, N, U, C>,
rb: &'a Queue<T, U, N>,
index: usize,
len: usize,
}
impl<'a, T, N, U, C> Clone for Iter<'a, T, N, U, C>
impl<'a, T, U, const N: usize> Clone for Iter<'a, T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
fn clone(&self) -> Self {
Self {
@ -562,33 +469,29 @@ where
}
/// A mutable iterator over the items of a queue
pub struct IterMut<'a, T, N, U, C>
pub struct IterMut<'a, T, U, const N: usize>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
rb: &'a mut Queue<T, N, U, C>,
rb: &'a mut Queue<T, U, N>,
index: usize,
len: usize,
}
macro_rules! iterator {
(struct $name:ident -> $elem:ty, $ptr:ty, $asptr:ident, $mkref:ident) => {
impl<'a, T, N, U, C> Iterator for $name<'a, T, N, U, C>
impl<'a, T, U, const N: usize> Iterator for $name<'a, T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
type Item = $elem;
fn next(&mut self) -> Option<$elem> {
if self.index < self.len {
let head = self.rb.0.head.load_relaxed().into();
let head = self.rb.head.load_relaxed().into();
let cap = self.rb.capacity().into();
let ptr = self.rb.0.buffer.$asptr() as $ptr;
let ptr = self.rb.buffer.$asptr() as $ptr;
let i = (head + self.index) % cap;
self.index += 1;
Some(unsafe { $mkref!(*ptr.offset(i as isize)) })
@ -598,18 +501,16 @@ macro_rules! iterator {
}
}
impl<'a, T, N, U, C> DoubleEndedIterator for $name<'a, T, N, U, C>
impl<'a, T, U, const N: usize> DoubleEndedIterator for $name<'a, T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
fn next_back(&mut self) -> Option<$elem> {
if self.index < self.len {
let head = self.rb.0.head.load_relaxed().into();
let head = self.rb.head.load_relaxed().into();
let cap = self.rb.capacity().into();
let ptr = self.rb.0.buffer.$asptr() as $ptr;
let ptr = self.rb.buffer.$asptr() as $ptr;
// self.len > 0, since it's larger than self.index > 0
let i = (head + self.len - 1) % cap;
self.len -= 1;
@ -641,11 +542,16 @@ iterator!(struct IterMut -> &'a mut T, *mut T, as_mut_ptr, make_ref_mut);
mod tests {
use hash32::Hasher;
use crate::{consts::*, spsc::Queue};
use crate::spsc::Queue;
#[test]
fn static_usize() {
static mut _Q: Queue<i32, usize, 4> = Queue::usize();
}
#[test]
fn static_new() {
static mut _Q: Queue<i32, U4> = Queue(crate::i::Queue::new());
static mut _Q: Queue<i32, usize, 4> = Queue::new();
}
#[test]
@ -671,7 +577,7 @@ mod tests {
static mut COUNT: i32 = 0;
{
let mut v: Queue<Droppable, U4> = Queue::new();
let mut v: Queue<Droppable, _, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
v.dequeue().unwrap();
@ -680,7 +586,7 @@ mod tests {
assert_eq!(unsafe { COUNT }, 0);
{
let mut v: Queue<Droppable, U4> = Queue::new();
let mut v: Queue<Droppable, _, 4> = Queue::new();
v.enqueue(Droppable::new()).ok().unwrap();
v.enqueue(Droppable::new()).ok().unwrap();
}
@ -690,7 +596,7 @@ mod tests {
#[test]
fn full() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -702,7 +608,7 @@ mod tests {
#[test]
fn iter() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -718,7 +624,7 @@ mod tests {
#[test]
fn iter_double_ended() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -735,7 +641,7 @@ mod tests {
#[test]
fn iter_overflow() {
let mut rb: Queue<i32, U4, u8> = Queue::u8();
let mut rb: Queue<i32, u8, 4> = unsafe { Queue::u8() };
rb.enqueue(0).unwrap();
for _ in 0..300 {
@ -749,7 +655,7 @@ mod tests {
#[test]
fn iter_mut() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -765,7 +671,7 @@ mod tests {
#[test]
fn iter_mut_double_ended() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -782,21 +688,17 @@ mod tests {
#[test]
fn sanity() {
let mut rb: Queue<i32, U4> = Queue::new();
let mut rb: Queue<i32, _, 4> = Queue::new();
assert_eq!(rb.dequeue(), None);
rb.enqueue(0).unwrap();
assert_eq!(rb.dequeue(), Some(0));
assert_eq!(rb.dequeue(), None);
}
#[test]
#[cfg(feature = "smaller-atomics")]
fn u8() {
let mut rb: Queue<u8, U256, _> = Queue::u8();
let mut rb: Queue<u8, u8, 256> = Queue::u8();
for _ in 0..255 {
rb.enqueue(0).unwrap();
@ -807,7 +709,7 @@ mod tests {
#[test]
fn wrap_around() {
let mut rb: Queue<i32, U3> = Queue::new();
let mut rb: Queue<i32, _, 3> = Queue::new();
rb.enqueue(0).unwrap();
rb.enqueue(1).unwrap();
@ -823,7 +725,7 @@ mod tests {
#[test]
fn ready_flag() {
let mut rb: Queue<i32, U2> = Queue::new();
let mut rb: Queue<i32, _, 2> = Queue::new();
let (mut p, mut c) = rb.split();
assert_eq!(c.ready(), false);
assert_eq!(p.ready(), true);
@ -851,7 +753,7 @@ mod tests {
#[test]
fn clone() {
let mut rb1: Queue<i32, U4> = Queue::new();
let mut rb1: Queue<i32, _, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
@ -866,12 +768,12 @@ mod tests {
fn eq() {
// generate two queues with same content
// but different buffer alignment
let mut rb1: Queue<i32, U4> = Queue::new();
let mut rb1: Queue<i32, _, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
rb1.enqueue(0).unwrap();
let mut rb2: Queue<i32, U4> = Queue::new();
let mut rb2: Queue<i32, _, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
assert!(rb1 == rb2);
@ -892,7 +794,7 @@ mod tests {
// generate two queues with same content
// but different buffer alignment
let rb1 = {
let mut rb1: Queue<i32, U4> = Queue::new();
let mut rb1: Queue<i32, _, 4> = Queue::new();
rb1.enqueue(0).unwrap();
rb1.enqueue(0).unwrap();
rb1.dequeue().unwrap();
@ -900,7 +802,7 @@ mod tests {
rb1
};
let rb2 = {
let mut rb2: Queue<i32, U4> = Queue::new();
let mut rb2: Queue<i32, _, 4> = Queue::new();
rb2.enqueue(0).unwrap();
rb2.enqueue(0).unwrap();
rb2

View File

@ -1,20 +1,13 @@
use core::{marker::PhantomData, ptr::NonNull};
use generic_array::ArrayLength;
use crate::{sealed::spsc as sealed, spsc::Queue};
use crate::{
sealed::spsc as sealed,
spsc::{MultiCore, Queue},
};
impl<T, N, U, C> Queue<T, N, U, C>
impl<T, U, const N: usize> Queue<T, U, N>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
/// Splits a statically allocated queue into producer and consumer end points
pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, N, U, C>, Consumer<'rb, T, N, U, C>) {
pub fn split<'rb>(&'rb mut self) -> (Producer<'rb, T, U, N>, Consumer<'rb, T, U, N>) {
(
Producer {
rb: unsafe { NonNull::new_unchecked(self) },
@ -30,58 +23,46 @@ where
/// A queue "consumer"; it can dequeue items from the queue
// NOTE the consumer semantically owns the `head` pointer of the queue
pub struct Consumer<'a, T, N, U = usize, C = MultiCore>
pub struct Consumer<'a, T, U, const N: usize>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
rb: NonNull<Queue<T, N, U, C>>,
rb: NonNull<Queue<T, U, N>>,
_marker: PhantomData<&'a ()>,
}
unsafe impl<'a, T, N, U, C> Send for Consumer<'a, T, N, U, C>
unsafe impl<'a, T, U, const N: usize> Send for Consumer<'a, T, U, N>
where
N: ArrayLength<T>,
T: Send,
U: sealed::Uxx,
C: sealed::XCore,
{
}
/// A queue "producer"; it can enqueue items into the queue
// NOTE the producer semantically owns the `tail` pointer of the queue
pub struct Producer<'a, T, N, U = usize, C = MultiCore>
pub struct Producer<'a, T, U, const N: usize>
where
N: ArrayLength<T>,
U: sealed::Uxx,
C: sealed::XCore,
{
rb: NonNull<Queue<T, N, U, C>>,
rb: NonNull<Queue<T, U, N>>,
_marker: PhantomData<&'a ()>,
}
unsafe impl<'a, T, N, U, C> Send for Producer<'a, T, N, U, C>
unsafe impl<'a, T, U, const N: usize> Send for Producer<'a, T, U, N>
where
N: ArrayLength<T>,
T: Send,
U: sealed::Uxx,
C: sealed::XCore,
{
}
macro_rules! impl_ {
($uxx:ident) => {
impl<'a, T, N, C> Consumer<'a, T, N, $uxx, C>
where
N: ArrayLength<T>,
C: sealed::XCore,
{
impl<'a, T, const N: usize> Consumer<'a, T, $uxx, N> {
/// Returns if there are any items to dequeue. When this returns true, at least the
/// first subsequent dequeue will succeed.
pub fn ready(&self) -> bool {
let head = unsafe { self.rb.as_ref().0.head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() }; // ▼
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼
return head != tail;
}
@ -90,9 +71,8 @@ macro_rules! impl_ {
/// # Examples
/// ```
/// use heapless::spsc::Queue;
/// use heapless::consts::*;
///
/// let mut queue: Queue<u8, U235, _> = Queue::u8();
/// let mut queue: Queue<u8, _, 235> = Queue::new();
/// let (mut producer, mut consumer) = queue.split();
/// assert_eq!(None, consumer.peek());
/// producer.enqueue(1);
@ -101,8 +81,8 @@ macro_rules! impl_ {
/// assert_eq!(None, consumer.peek());
/// ```
pub fn peek(&self) -> Option<&T> {
let head = unsafe { self.rb.as_ref().0.head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() };
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
if head != tail {
Some(unsafe { self._peek(head) })
@ -113,8 +93,8 @@ macro_rules! impl_ {
/// Returns the item in the front of the queue, or `None` if the queue is empty
pub fn dequeue(&mut self) -> Option<T> {
let head = unsafe { self.rb.as_ref().0.head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() }; // ▼
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_acquire() }; // ▼
if head != tail {
Some(unsafe { self._dequeue(head) }) // ▲
@ -129,8 +109,8 @@ macro_rules! impl_ {
///
/// If the queue is empty this is equivalent to calling `mem::uninitialized`
pub unsafe fn dequeue_unchecked(&mut self) -> T {
let head = self.rb.as_ref().0.head.load_relaxed();
debug_assert_ne!(head, self.rb.as_ref().0.tail.load_acquire());
let head = self.rb.as_ref().head.load_relaxed();
debug_assert_ne!(head, self.rb.as_ref().tail.load_acquire());
self._dequeue(head) // ▲
}
@ -146,8 +126,8 @@ macro_rules! impl_ {
/// This is a conservative estimate. Interrupt during this function
/// might cause that the `Consumer` actually has more than N items available.
pub fn len(&self) -> $uxx {
let head = unsafe { self.rb.as_ref().0.head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().0.tail.load_acquire() };
let head = unsafe { self.rb.as_ref().head.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_acquire() };
tail.wrapping_sub(head)
}
@ -156,7 +136,7 @@ macro_rules! impl_ {
let cap = rb.capacity();
let item = (rb.0.buffer.as_ptr() as *const T).add(usize::from(head % cap));
let item = (rb.buffer.as_ptr() as *const T).add(usize::from(head % cap));
&*item
}
@ -165,31 +145,27 @@ macro_rules! impl_ {
let cap = rb.capacity();
let item = (rb.0.buffer.as_ptr() as *const T)
let item = (rb.buffer.as_ptr() as *const T)
.add(usize::from(head % cap))
.read();
rb.0.head.store_release(head.wrapping_add(1)); // ▲
rb.head.store_release(head.wrapping_add(1)); // ▲
item
}
}
impl<'a, T, N, C> Producer<'a, T, N, $uxx, C>
where
N: ArrayLength<T>,
C: sealed::XCore,
{
impl<'a, T, const N: usize> Producer<'a, T, $uxx, N> {
/// Returns if there is any space to enqueue a new item. When this returns true, at
/// least the first subsequent enqueue will succeed.
pub fn ready(&self) -> bool {
let cap = unsafe { self.rb.as_ref().capacity() };
let tail = unsafe { self.rb.as_ref().0.tail.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
// NOTE we could replace this `load_acquire` with a `load_relaxed` and this method
// would be sound on most architectures but that change would result in UB according
// to the C++ memory model, which is what Rust currently uses, so we err on the side
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
// more details.
let head = unsafe { self.rb.as_ref().0.head.load_acquire() };
let head = unsafe { self.rb.as_ref().head.load_acquire() };
return head.wrapping_add(cap) != tail;
}
@ -198,13 +174,13 @@ macro_rules! impl_ {
/// Returns back the `item` if the queue is full
pub fn enqueue(&mut self, item: T) -> Result<(), T> {
let cap = unsafe { self.rb.as_ref().capacity() };
let tail = unsafe { self.rb.as_ref().0.tail.load_relaxed() };
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
// NOTE we could replace this `load_acquire` with a `load_relaxed` and this method
// would be sound on most architectures but that change would result in UB according
// to the C++ memory model, which is what Rust currently uses, so we err on the side
// of caution and stick to `load_acquire`. Check issue google#sanitizers#882 for
// more details.
let head = unsafe { self.rb.as_ref().0.head.load_acquire() }; // ▼
let head = unsafe { self.rb.as_ref().head.load_acquire() }; // ▼
if tail.wrapping_sub(head) > cap - 1 {
Err(item)
@ -226,8 +202,8 @@ macro_rules! impl_ {
/// This is a conservative estimate. Interrupt during this function
/// might cause that the `Producer` actually has more than N items of available space.
pub fn len(&self) -> $uxx {
let head = unsafe { self.rb.as_ref().0.head.load_acquire() };
let tail = unsafe { self.rb.as_ref().0.tail.load_relaxed() };
let head = unsafe { self.rb.as_ref().head.load_acquire() };
let tail = unsafe { self.rb.as_ref().tail.load_relaxed() };
tail.wrapping_sub(head)
}
@ -240,8 +216,8 @@ macro_rules! impl_ {
/// to create a copy of `item`, which could result in `T`'s destructor running on `item`
/// twice.
pub unsafe fn enqueue_unchecked(&mut self, item: T) {
let tail = self.rb.as_ref().0.tail.load_relaxed();
debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().0.head.load_acquire());
let tail = self.rb.as_ref().tail.load_relaxed();
debug_assert_ne!(tail.wrapping_add(1), self.rb.as_ref().head.load_acquire());
self._enqueue(tail, item); // ▲
}
@ -253,10 +229,10 @@ macro_rules! impl_ {
// NOTE(ptr::write) the memory slot that we are about to write to is
// uninitialized. We use `ptr::write` to avoid running `T`'s destructor on the
// uninitialized memory
(rb.0.buffer.as_mut_ptr() as *mut T)
(rb.buffer.as_mut_ptr() as *mut T)
.add(usize::from(tail % cap))
.write(item);
rb.0.tail.store_release(tail.wrapping_add(1)); // ▲
rb.tail.store_release(tail.wrapping_add(1)); // ▲
}
}
};
@ -268,11 +244,11 @@ impl_!(usize);
#[cfg(test)]
mod tests {
use crate::{consts::*, spsc::Queue};
use crate::spsc::Queue;
#[test]
fn sanity() {
let mut rb: Queue<i32, U2> = Queue::new();
let mut rb: Queue<i32, _, 2> = Queue::new();
let (mut p, mut c) = rb.split();

View File

@ -1,38 +1,15 @@
use core::{
fmt,
fmt::Write,
hash,
mem::{self, MaybeUninit},
ops, str,
str::Utf8Error,
};
use core::{fmt, fmt::Write, hash, ops, str};
use generic_array::{
typenum::{consts::*, IsGreaterOrEqual},
ArrayLength, GenericArray,
};
use hash32;
use crate::Vec;
/// A fixed capacity [`String`](https://doc.rust-lang.org/std/string/struct.String.html)
pub struct String<N>(#[doc(hidden)] pub crate::i::String<GenericArray<u8, N>>)
where
N: ArrayLength<u8>;
impl<A> crate::i::String<A> {
/// `String` `const` constructor; wrap the returned value in [`String`](../struct.String.html)
pub const fn new() -> Self {
Self {
vec: crate::i::Vec::new(),
}
}
pub struct String<const N: usize> {
vec: Vec<u8, N>,
}
impl<N> String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> String<N> {
/// Constructs a new, empty `String` with a fixed capacity of `N`
///
/// # Examples
@ -41,78 +18,16 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// // allocate the string on the stack
/// let mut s: String<U4> = String::new();
/// let mut s: String<4> = String::new();
///
/// // allocate the string in a static variable
/// static mut S: String<U4> = String(heapless::i::String::new());
/// static mut S: String<4> = String::new();
/// ```
#[inline]
pub fn new() -> Self {
String(crate::i::String::new())
}
/// Converts a vector of bytes into a `String`.
///
/// A string slice ([`&str`]) is made of bytes ([`u8`]), and a vector of bytes
/// ([`Vec<u8>`]) is made of bytes, so this function converts between the
/// two. Not all byte slices are valid `String`s, however: `String`
/// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
/// the bytes are valid UTF-8, and then does the conversion.
///
/// See std::String for further information.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use heapless::{String, Vec};
/// use heapless::consts::*;
///
/// let mut v: Vec<u8, U8> = Vec::new();
/// v.push('a' as u8).unwrap();
/// v.push('b' as u8).unwrap();
///
/// let s = String::from_utf8(v).unwrap();
/// assert!(s.len() == 2);
/// ```
///
/// Incorrect bytes:
///
/// ```
/// use heapless::{String, Vec};
/// use heapless::consts::*;
///
/// // some invalid bytes, in a vector
///
/// let mut v: Vec<u8, U8> = Vec::new();
/// v.push(0).unwrap();
/// v.push(159).unwrap();
/// v.push(146).unwrap();
/// v.push(150).unwrap();
/// assert!(String::from_utf8(v).is_err());
/// ```
#[inline]
pub fn from_utf8(vec: Vec<u8, N>) -> Result<String<N>, Utf8Error> {
// validate input
str::from_utf8(&*vec)?;
Ok(unsafe { String::from_utf8_unchecked(vec) })
}
/// Converts a vector of bytes to a `String` without checking that the
/// string contains valid UTF-8.
///
/// See the safe version, `from_utf8`, for more details.
#[inline]
pub unsafe fn from_utf8_unchecked(mut vec: Vec<u8, N>) -> String<N> {
// FIXME this may result in a memcpy at runtime
let vec_ = mem::replace(&mut vec.0, MaybeUninit::uninit().assume_init());
mem::forget(vec);
String(crate::i::String { vec: vec_ })
pub const fn new() -> Self {
Self { vec: Vec::new() }
}
/// Converts a `String` into a byte vector.
@ -125,9 +40,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let s: String<U4> = String::from("ab");
/// let s: String<4> = String::from("ab");
/// let b = s.into_bytes();
/// assert!(b.len() == 2);
///
@ -135,7 +49,7 @@ where
/// ```
#[inline]
pub fn into_bytes(self) -> Vec<u8, N> {
Vec(self.0.vec)
self.vec
}
/// Extracts a string slice containing the entire string.
@ -146,9 +60,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U4> = String::from("ab");
/// let mut s: String<4> = String::from("ab");
/// assert!(s.as_str() == "ab");
///
/// let _s = s.as_str();
@ -156,7 +69,7 @@ where
/// ```
#[inline]
pub fn as_str(&self) -> &str {
unsafe { str::from_utf8_unchecked(self.0.vec.as_slice()) }
unsafe { str::from_utf8_unchecked(self.vec.as_slice()) }
}
/// Converts a `String` into a mutable string slice.
@ -167,15 +80,14 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U4> = String::from("ab");
/// let mut s: String<4> = String::from("ab");
/// let s = s.as_mut_str();
/// s.make_ascii_uppercase();
/// ```
#[inline]
pub fn as_mut_str(&mut self) -> &mut str {
unsafe { str::from_utf8_unchecked_mut(self.0.vec.as_mut_slice()) }
unsafe { str::from_utf8_unchecked_mut(self.vec.as_mut_slice()) }
}
/// Returns a mutable reference to the contents of this `String`.
@ -203,7 +115,7 @@ where
/// assert_eq!(s, "olleh");
/// ```
pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8, N> {
&mut *(&mut self.0.vec as *mut crate::i::Vec<GenericArray<u8, N>> as *mut Vec<u8, N>)
&mut self.vec
}
/// Appends a given string slice onto the end of this `String`.
@ -214,9 +126,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U8> = String::from("foo");
/// let mut s: String<8> = String::from("foo");
///
/// assert!(s.push_str("bar").is_ok());
///
@ -226,7 +137,7 @@ where
/// ```
#[inline]
pub fn push_str(&mut self, string: &str) -> Result<(), ()> {
self.0.vec.extend_from_slice(string.as_bytes())
self.vec.extend_from_slice(string.as_bytes())
}
/// Returns the maximum number of elements the String can hold
@ -237,14 +148,13 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U4> = String::new();
/// let mut s: String<4> = String::new();
/// assert!(s.capacity() == 4);
/// ```
#[inline]
pub fn capacity(&self) -> usize {
self.0.vec.capacity()
self.vec.capacity()
}
/// Appends the given [`char`] to the end of this `String`.
@ -257,9 +167,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U8> = String::from("abc");
/// let mut s: String<8> = String::from("abc");
///
/// s.push('1').unwrap();
/// s.push('2').unwrap();
@ -272,9 +181,8 @@ where
#[inline]
pub fn push(&mut self, c: char) -> Result<(), ()> {
match c.len_utf8() {
1 => self.0.vec.push(c as u8).map_err(|_| {}),
1 => self.vec.push(c as u8).map_err(|_| {}),
_ => self
.0
.vec
.extend_from_slice(c.encode_utf8(&mut [0; 4]).as_bytes()),
}
@ -300,9 +208,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U8> = String::from("hello");
/// let mut s: String<8> = String::from("hello");
///
/// s.truncate(2);
///
@ -312,7 +219,7 @@ where
pub fn truncate(&mut self, new_len: usize) {
if new_len <= self.len() {
assert!(self.is_char_boundary(new_len));
self.0.vec.truncate(new_len)
self.vec.truncate(new_len)
}
}
@ -328,9 +235,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U8> = String::from("foo");
/// let mut s: String<8> = String::from("foo");
///
/// assert_eq!(s.pop(), Some('o'));
/// assert_eq!(s.pop(), Some('o'));
@ -344,7 +250,7 @@ where
// pop bytes that correspond to `ch`
for _ in 0..ch.len_utf8() {
unsafe {
self.0.vec.pop_unchecked();
self.vec.pop_unchecked();
}
}
@ -362,9 +268,8 @@ where
///
/// ```
/// use heapless::String;
/// use heapless::consts::*;
///
/// let mut s: String<U8> = String::from("foo");
/// let mut s: String<8> = String::from("foo");
///
/// s.clear();
///
@ -374,23 +279,17 @@ where
/// ```
#[inline]
pub fn clear(&mut self) {
self.0.vec.clear()
self.vec.clear()
}
}
impl<N> Default for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> Default for String<N> {
fn default() -> Self {
Self::new()
}
}
impl<'a, N> From<&'a str> for String<N>
where
N: ArrayLength<u8>,
{
impl<'a, const N: usize> From<&'a str> for String<N> {
fn from(s: &'a str) -> Self {
let mut new = String::new();
new.push_str(s).unwrap();
@ -398,10 +297,7 @@ where
}
}
impl<N> str::FromStr for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> str::FromStr for String<N> {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
@ -411,59 +307,41 @@ where
}
}
impl<N> Clone for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> Clone for String<N> {
fn clone(&self) -> Self {
Self(crate::i::String {
vec: self.0.vec.clone(),
})
Self {
vec: self.vec.clone(),
}
}
}
impl<N> fmt::Debug for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> fmt::Debug for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Debug>::fmt(self, f)
}
}
impl<N> fmt::Display for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> fmt::Display for String<N> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Display>::fmt(self, f)
}
}
impl<N> hash::Hash for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> hash::Hash for String<N> {
#[inline]
fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
<str as hash::Hash>::hash(self, hasher)
}
}
impl<N> hash32::Hash for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> hash32::Hash for String<N> {
#[inline]
fn hash<H: hash32::Hasher>(&self, hasher: &mut H) {
<str as hash32::Hash>::hash(self, hasher)
}
}
impl<N> fmt::Write for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> fmt::Write for String<N> {
fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> {
self.push_str(s).map_err(|_| fmt::Error)
}
@ -473,10 +351,7 @@ where
}
}
impl<N> ops::Deref for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> ops::Deref for String<N> {
type Target = str;
fn deref(&self) -> &str {
@ -484,40 +359,27 @@ where
}
}
impl<N> ops::DerefMut for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> ops::DerefMut for String<N> {
fn deref_mut(&mut self) -> &mut str {
self.as_mut_str()
}
}
impl<N> AsRef<str> for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> AsRef<str> for String<N> {
#[inline]
fn as_ref(&self) -> &str {
self
}
}
impl<N> AsRef<[u8]> for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> AsRef<[u8]> for String<N> {
#[inline]
fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
}
impl<N1, N2> PartialEq<String<N2>> for String<N1>
where
N1: ArrayLength<u8>,
N2: ArrayLength<u8>,
{
impl<const N1: usize, const N2: usize> PartialEq<String<N2>> for String<N1> {
fn eq(&self, rhs: &String<N2>) -> bool {
str::eq(&**self, &**rhs)
}
@ -527,49 +389,59 @@ where
}
}
macro_rules! impl_eq {
($lhs:ty, $rhs:ty) => {
impl<'a, 'b, N> PartialEq<$rhs> for $lhs
where
N: ArrayLength<u8>,
{
#[inline]
fn eq(&self, other: &$rhs) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$rhs) -> bool {
str::ne(&self[..], &other[..])
}
}
impl<'a, 'b, N> PartialEq<$lhs> for $rhs
where
N: ArrayLength<u8>,
{
#[inline]
fn eq(&self, other: &$lhs) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &$lhs) -> bool {
str::ne(&self[..], &other[..])
}
}
};
// String<N> == str
impl<const N: usize> PartialEq<str> for String<N> {
#[inline]
fn eq(&self, other: &str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &str) -> bool {
str::ne(&self[..], &other[..])
}
}
impl_eq! { String<N>, str }
impl_eq! { String<N>, &'a str }
// String<N> == &'str
impl<const N: usize> PartialEq<&str> for String<N> {
#[inline]
fn eq(&self, other: &&str) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &&str) -> bool {
str::ne(&self[..], &other[..])
}
}
impl<N> Eq for String<N> where N: ArrayLength<u8> {}
// str == String<N>
impl<const N: usize> PartialEq<String<N>> for str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
// &'str == String<N>
impl<const N: usize> PartialEq<String<N>> for &str {
#[inline]
fn eq(&self, other: &String<N>) -> bool {
str::eq(&self[..], &other[..])
}
#[inline]
fn ne(&self, other: &String<N>) -> bool {
str::ne(&self[..], &other[..])
}
}
impl<const N: usize> Eq for String<N> {}
macro_rules! impl_from_num {
($num:ty, $size:ty) => {
impl<N> From<$num> for String<N>
where
N: ArrayLength<u8> + IsGreaterOrEqual<$size, Output = True>,
{
($num:ty, $size:expr) => {
impl<const N: usize> From<$num> for String<N> {
fn from(s: $num) -> Self {
let mut new = String::new();
write!(&mut new, "{}", s).unwrap();
@ -579,28 +451,28 @@ macro_rules! impl_from_num {
};
}
impl_from_num!(i8, U4);
impl_from_num!(i16, U6);
impl_from_num!(i32, U11);
impl_from_num!(i64, U20);
impl_from_num!(i8, 4);
impl_from_num!(i16, 6);
impl_from_num!(i32, 11);
impl_from_num!(i64, 20);
impl_from_num!(u8, U3);
impl_from_num!(u16, U5);
impl_from_num!(u32, U10);
impl_from_num!(u64, U20);
impl_from_num!(u8, 3);
impl_from_num!(u16, 5);
impl_from_num!(u32, 10);
impl_from_num!(u64, 20);
#[cfg(test)]
mod tests {
use crate::{consts::*, String, Vec};
use crate::{String, Vec};
#[test]
fn static_new() {
static mut _S: String<U8> = String(crate::i::String::new());
static mut _S: String<8> = String::new();
}
#[test]
fn clone() {
let s1: String<U20> = String::from("abcd");
let s1: String<20> = String::from("abcd");
let mut s2 = s1.clone();
s2.push_str(" efgh").unwrap();
@ -612,7 +484,7 @@ mod tests {
fn debug() {
use core::fmt::Write;
let s: String<U8> = String::from("abcd");
let s: String<8> = String::from("abcd");
let mut std_s = std::string::String::new();
write!(std_s, "{:?}", s).unwrap();
assert_eq!("\"abcd\"", std_s);
@ -622,7 +494,7 @@ mod tests {
fn display() {
use core::fmt::Write;
let s: String<U8> = String::from("abcd");
let s: String<8> = String::from("abcd");
let mut std_s = std::string::String::new();
write!(std_s, "{}", s).unwrap();
assert_eq!("abcd", std_s);
@ -630,7 +502,7 @@ mod tests {
#[test]
fn empty() {
let s: String<U4> = String::new();
let s: String<4> = String::new();
assert!(s.capacity() == 4);
assert_eq!(s, "");
assert_eq!(s.len(), 0);
@ -639,7 +511,7 @@ mod tests {
#[test]
fn from() {
let s: String<U4> = String::from("123");
let s: String<4> = String::from("123");
assert!(s.len() == 3);
assert_eq!(s, "123");
}
@ -648,84 +520,37 @@ mod tests {
fn from_str() {
use core::str::FromStr;
let s: String<U4> = String::<U4>::from_str("123").unwrap();
let s: String<4> = String::<4>::from_str("123").unwrap();
assert!(s.len() == 3);
assert_eq!(s, "123");
let e: () = String::<U2>::from_str("123").unwrap_err();
let e: () = String::<2>::from_str("123").unwrap_err();
assert_eq!(e, ());
}
#[test]
#[should_panic]
fn from_panic() {
let _: String<U4> = String::from("12345");
}
#[test]
fn from_utf8() {
let mut v: Vec<u8, U8> = Vec::new();
v.push('a' as u8).unwrap();
v.push('b' as u8).unwrap();
let s = String::from_utf8(v).unwrap();
assert_eq!(s, "ab");
}
#[test]
fn from_utf8_uenc() {
let mut v: Vec<u8, U8> = Vec::new();
v.push(240).unwrap();
v.push(159).unwrap();
v.push(146).unwrap();
v.push(150).unwrap();
assert!(String::from_utf8(v).is_ok());
}
#[test]
fn from_utf8_uenc_err() {
let mut v: Vec<u8, U8> = Vec::new();
v.push(0).unwrap();
v.push(159).unwrap();
v.push(146).unwrap();
v.push(150).unwrap();
assert!(String::from_utf8(v).is_err());
}
#[test]
fn from_utf8_unchecked() {
let mut v: Vec<u8, U8> = Vec::new();
v.push(104).unwrap();
v.push(101).unwrap();
v.push(108).unwrap();
v.push(108).unwrap();
v.push(111).unwrap();
let s = unsafe { String::from_utf8_unchecked(v) };
assert_eq!(s, "hello");
let _: String<4> = String::from("12345");
}
#[test]
fn from_num() {
let v = String::<U20>::from(18446744073709551615 as u64);
let v: String<20> = String::from(18446744073709551615 as u64);
assert_eq!(v, "18446744073709551615");
}
#[test]
fn into_bytes() {
let s: String<U4> = String::from("ab");
let b: Vec<u8, U4> = s.into_bytes();
let s: String<4> = String::from("ab");
let b: Vec<u8, 4> = s.into_bytes();
assert_eq!(b.len(), 2);
assert_eq!(&['a' as u8, 'b' as u8], &b[..]);
}
#[test]
fn as_str() {
let s: String<U4> = String::from("ab");
let s: String<4> = String::from("ab");
assert_eq!(s.as_str(), "ab");
// should be moved to fail test
@ -735,7 +560,7 @@ mod tests {
#[test]
fn as_mut_str() {
let mut s: String<U4> = String::from("ab");
let mut s: String<4> = String::from("ab");
let s = s.as_mut_str();
s.make_ascii_uppercase();
assert_eq!(s, "AB");
@ -743,16 +568,18 @@ mod tests {
#[test]
fn push_str() {
let mut s: String<U8> = String::from("foo");
let mut s: String<8> = String::from("foo");
assert!(s.push_str("bar").is_ok());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
assert!(s.push_str("tender").is_err());
assert_eq!("foobar", s);
assert_eq!(s, "foobar");
}
#[test]
fn push() {
let mut s: String<U6> = String::from("abc");
let mut s: String<6> = String::from("abc");
assert!(s.push('1').is_ok());
assert!(s.push('2').is_ok());
assert!(s.push('3').is_ok());
@ -762,13 +589,13 @@ mod tests {
#[test]
fn as_bytes() {
let s: String<U8> = String::from("hello");
let s: String<8> = String::from("hello");
assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
}
#[test]
fn truncate() {
let mut s: String<U8> = String::from("hello");
let mut s: String<8> = String::from("hello");
s.truncate(6);
assert_eq!(s.len(), 5);
s.truncate(2);
@ -779,7 +606,7 @@ mod tests {
#[test]
fn pop() {
let mut s: String<U8> = String::from("foo");
let mut s: String<8> = String::from("foo");
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('o'));
assert_eq!(s.pop(), Some('f'));
@ -788,7 +615,7 @@ mod tests {
#[test]
fn pop_uenc() {
let mut s: String<U8> = String::from("");
let mut s: String<8> = String::from("");
assert_eq!(s.len(), 3);
match s.pop() {
Some(c) => {
@ -802,7 +629,7 @@ mod tests {
#[test]
fn is_empty() {
let mut v: String<U8> = String::new();
let mut v: String<8> = String::new();
assert!(v.is_empty());
let _ = v.push('a');
assert!(!v.is_empty());
@ -810,7 +637,7 @@ mod tests {
#[test]
fn clear() {
let mut s: String<U8> = String::from("foo");
let mut s: String<8> = String::from("foo");
s.clear();
assert!(s.is_empty());
assert_eq!(0, s.len());

View File

@ -1,21 +1,14 @@
use crate::{string::String, vec::Vec};
use ufmt_write::uWrite;
use crate::{string::String, vec::Vec, ArrayLength};
impl<N> uWrite for String<N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> uWrite for String<N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.push_str(s)
}
}
impl<N> uWrite for Vec<u8, N>
where
N: ArrayLength<u8>,
{
impl<const N: usize> uWrite for Vec<u8, N> {
type Error = ();
fn write_str(&mut self, s: &str) -> Result<(), Self::Error> {
self.extend_from_slice(s.as_bytes())
@ -28,8 +21,6 @@ mod tests {
use ufmt::{derive::uDebug, uwrite};
use crate::consts::*;
#[derive(uDebug)]
struct Pair {
x: u32,
@ -41,7 +32,7 @@ mod tests {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut s = String::<U32>::new();
let mut s = String::<32>::new();
uwrite!(s, "{} -> {:?}", a, b).unwrap();
assert_eq!(s, "123 -> Pair { x: 0, y: 1234 }");
@ -50,7 +41,7 @@ mod tests {
#[test]
fn test_string_err() {
let p = Pair { x: 0, y: 1234 };
let mut s = String::<U4>::new();
let mut s = String::<4>::new();
assert!(uwrite!(s, "{:?}", p).is_err());
}
@ -59,7 +50,7 @@ mod tests {
let a = 123;
let b = Pair { x: 0, y: 1234 };
let mut v = Vec::<u8, U32>::new();
let mut v = Vec::<u8, 32>::new();
uwrite!(v, "{} -> {:?}", a, b).unwrap();
assert_eq!(v, b"123 -> Pair { x: 0, y: 1234 }");

File diff suppressed because it is too large Load Diff

View File

@ -1,7 +1,6 @@
//! Collections of `Send`-able things are `Send`
use heapless::{
consts,
spsc::{Consumer, Producer, Queue},
HistoryBuffer, Vec,
};
@ -18,9 +17,9 @@ fn send() {
{
}
is_send::<Consumer<IsSend, consts::U4>>();
is_send::<Producer<IsSend, consts::U4>>();
is_send::<Queue<IsSend, consts::U4>>();
is_send::<Vec<IsSend, consts::U4>>();
is_send::<HistoryBuffer<IsSend, consts::U4>>();
is_send::<Consumer<IsSend, 4>>();
is_send::<Producer<IsSend, 4>>();
is_send::<Queue<IsSend, 4>>();
is_send::<Vec<IsSend, 4>>();
is_send::<HistoryBuffer<IsSend, 4>>();
}

View File

@ -4,13 +4,12 @@
use std::{sync::mpsc, thread};
use generic_array::typenum::Unsigned;
use heapless::{consts::*, mpmc::Q64, spsc};
use heapless::{mpmc::Q64, spsc};
use scoped_threadpool::Pool;
#[test]
fn once() {
static mut RB: spsc::Queue<i32, U4> = spsc::Queue(heapless::i::Queue::new());
static mut RB: spsc::Queue<i32, 4> = spsc::Queue::new();
let rb = unsafe { &mut RB };
@ -31,7 +30,7 @@ fn once() {
#[test]
fn twice() {
static mut RB: spsc::Queue<i32, U4> = spsc::Queue(heapless::i::Queue::new());
static mut RB: spsc::Queue<i32, 5> = spsc::Queue::new();
let rb = unsafe { &mut RB };
@ -53,7 +52,7 @@ fn twice() {
#[test]
fn scoped() {
let mut rb: spsc::Queue<i32, U4> = spsc::Queue::new();
let mut rb: spsc::Queue<i32, 5> = spsc::Queue::new();
rb.enqueue(0).unwrap();
@ -76,9 +75,9 @@ fn scoped() {
#[test]
fn contention() {
type N = U1024;
const N: usize = 1024;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
let mut rb: spsc::Queue<u8, 4> = spsc::Queue::new();
{
let (mut p, mut c) = rb.split();
@ -87,8 +86,8 @@ fn contention() {
scope.execute(move || {
let mut sum: u32 = 0;
for i in 0..(2 * N::to_u32()) {
sum = sum.wrapping_add(i);
for i in 0..(2 * N) {
sum = sum.wrapping_add(i as u32);
while let Err(_) = p.enqueue(i as u8) {}
}
@ -98,7 +97,7 @@ fn contention() {
scope.execute(move || {
let mut sum: u32 = 0;
for _ in 0..(2 * N::to_u32()) {
for _ in 0..(2 * N) {
loop {
match c.dequeue() {
Some(v) => {
@ -163,11 +162,11 @@ fn mpmc_contention() {
#[test]
fn unchecked() {
type N = U1024;
const N: usize = 1024;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
for _ in 0..N::to_usize() / 2 {
for _ in 0..N / 2 - 1 {
rb.enqueue(1).unwrap();
}
@ -176,31 +175,29 @@ fn unchecked() {
Pool::new(2).scoped(move |scope| {
scope.execute(move || {
for _ in 0..N::to_usize() / 2 {
unsafe {
p.enqueue_unchecked(2);
}
for _ in 0..N / 2 - 1 {
p.enqueue(2).unwrap();
}
});
scope.execute(move || {
let mut sum: usize = 0;
for _ in 0..N::to_usize() / 2 {
sum = sum.wrapping_add(usize::from(unsafe { c.dequeue_unchecked() }));
for _ in 0..N / 2 - 1 {
sum = sum.wrapping_add(usize::from(c.dequeue().unwrap()));
}
assert_eq!(sum, N::to_usize() / 2);
assert_eq!(sum, N / 2 - 1);
});
});
}
assert_eq!(rb.len(), N::to_usize() / 2);
assert_eq!(rb.len(), N / 2 - 1);
}
#[test]
fn len_properly_wraps() {
type N = U3;
const N: usize = 4;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
@ -217,7 +214,7 @@ fn len_properly_wraps() {
#[test]
fn iterator_properly_wraps() {
type N = U3;
const N: usize = 4;
let mut rb: spsc::Queue<u8, N> = spsc::Queue::new();
rb.enqueue(1).unwrap();
@ -233,6 +230,7 @@ fn iterator_properly_wraps() {
assert_eq!(expected, actual)
}
#[cfg(all(target_arch = "x86_64", feature = "x86-sync-pool"))]
#[test]
fn pool() {
use heapless::pool::singleton::Pool as _;