Merge pull request #456 from reitermarkus/fix-pool-macro-warnings

Fix pool macro warnings.
This commit is contained in:
Markus Reiter 2024-02-27 17:19:02 +00:00 committed by GitHub
commit 483862b3e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 164 additions and 182 deletions

View File

@ -25,7 +25,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
### Fixed ### Fixed
- Fixed clippy lints. - Fixed clippy lints.
- Fixed `{arc,box,object}_pool!` emitting clippy lints for `CamelCase` and `SNAKE_CASE`. - Fixed `{arc,box,object}_pool!` emitting clippy lints.
- Fixed the list of implemented data structures in the crate docs, by adding `Deque`, - Fixed the list of implemented data structures in the crate docs, by adding `Deque`,
`HistoryBuffer` and `SortedLinkedList` to the list. `HistoryBuffer` and `SortedLinkedList` to the list.

View File

@ -19,13 +19,13 @@
//! //!
//! - test program: //! - test program:
//! //!
//! ``` no_run //! ```no_run
//! use heapless::box_pool; //! use heapless::box_pool;
//! //!
//! box_pool!(P: ()); // or `arc_pool!` or `object_pool!` //! box_pool!(MyBoxPool: ()); // or `arc_pool!` or `object_pool!`
//! //!
//! bkpt(); //! bkpt();
//! let res = P.alloc(()); //! let res = MyBoxPool.alloc(());
//! bkpt(); //! bkpt();
//! //!
//! if let Ok(boxed) = res { //! if let Ok(boxed) = res {

View File

@ -5,23 +5,23 @@
//! ``` //! ```
//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}}; //! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
//! //!
//! arc_pool!(P: u128); //! arc_pool!(MyArcPool: u128);
//! //!
//! // cannot allocate without first giving memory blocks to the pool //! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err()); //! assert!(MyArcPool.alloc(42).is_err());
//! //!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ArcBlock<u128> = unsafe { //! let block: &'static mut ArcBlock<u128> = unsafe {
//! static mut B: ArcBlock<u128> = ArcBlock::new(); //! static mut BLOCK: ArcBlock<u128> = ArcBlock::new();
//! &mut B //! &mut BLOCK
//! }; //! };
//! //!
//! P.manage(block); //! MyArcPool.manage(block);
//! //!
//! let arc = P.alloc(1).unwrap(); //! let arc = MyArcPool.alloc(1).unwrap();
//! //!
//! // number of smart pointers is limited to the number of blocks managed by the pool //! // number of smart pointers is limited to the number of blocks managed by the pool
//! let res = P.alloc(2); //! let res = MyArcPool.alloc(2);
//! assert!(res.is_err()); //! assert!(res.is_err());
//! //!
//! // but cloning does not consume an `ArcBlock` //! // but cloning does not consume an `ArcBlock`
@ -34,7 +34,7 @@
//! drop(arc); // release memory //! drop(arc); // release memory
//! //!
//! // it's now possible to allocate a new `Arc` smart pointer //! // it's now possible to allocate a new `Arc` smart pointer
//! let res = P.alloc(3); //! let res = MyArcPool.alloc(3);
//! //!
//! assert!(res.is_ok()); //! assert!(res.is_ok());
//! ``` //! ```
@ -47,7 +47,7 @@
//! ``` //! ```
//! use heapless::{arc_pool, pool::arc::ArcBlock}; //! use heapless::{arc_pool, pool::arc::ArcBlock};
//! //!
//! arc_pool!(P: u128); //! arc_pool!(MyArcPool: u128);
//! //!
//! const POOL_CAPACITY: usize = 8; //! const POOL_CAPACITY: usize = 8;
//! //!
@ -58,7 +58,7 @@
//! }; //! };
//! //!
//! for block in blocks { //! for block in blocks {
//! P.manage(block); //! MyArcPool.manage(block);
//! } //! }
//! ``` //! ```
@ -83,13 +83,14 @@ use super::treiber::{NonNullPtr, Stack, UnionNode};
#[macro_export] #[macro_export]
macro_rules! arc_pool { macro_rules! arc_pool {
($name:ident: $data_type:ty) => { ($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name; pub struct $name;
impl $crate::pool::arc::ArcPool for $name { impl $crate::pool::arc::ArcPool for $name {
type Data = $data_type; type Data = $data_type;
fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> { fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)] #[allow(non_upper_case_globals)]
static $name: $crate::pool::arc::ArcPoolImpl<$data_type> = static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
$crate::pool::arc::ArcPoolImpl::new(); $crate::pool::arc::ArcPoolImpl::new();
@ -386,67 +387,67 @@ mod tests {
#[test] #[test]
fn cannot_alloc_if_empty() { fn cannot_alloc_if_empty() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
assert_eq!(Err(42), P.alloc(42),); assert_eq!(Err(42), MyArcPool.alloc(42),);
} }
#[test] #[test]
fn can_alloc_if_manages_one_block() { fn can_alloc_if_manages_one_block() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new(); static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
assert_eq!(42, *P.alloc(42).unwrap()); assert_eq!(42, *MyArcPool.alloc(42).unwrap());
} }
#[test] #[test]
fn alloc_drop_alloc() { fn alloc_drop_alloc() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new(); static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(1).unwrap(); let arc = MyArcPool.alloc(1).unwrap();
drop(arc); drop(arc);
assert_eq!(2, *P.alloc(2).unwrap()); assert_eq!(2, *MyArcPool.alloc(2).unwrap());
} }
#[test] #[test]
fn strong_count_starts_at_one() { fn strong_count_starts_at_one() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new(); static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap(); let arc = MyArcPool.alloc(1).ok().unwrap();
assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed)); assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
} }
#[test] #[test]
fn clone_increases_strong_count() { fn clone_increases_strong_count() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new(); static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap(); let arc = MyArcPool.alloc(1).ok().unwrap();
let before = arc.inner().strong.load(Ordering::Relaxed); let before = arc.inner().strong.load(Ordering::Relaxed);
@ -459,15 +460,15 @@ mod tests {
#[test] #[test]
fn drop_decreases_strong_count() { fn drop_decreases_strong_count() {
arc_pool!(P: i32); arc_pool!(MyArcPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new(); static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap(); let arc = MyArcPool.alloc(1).ok().unwrap();
let arc2 = arc.clone(); let arc2 = arc.clone();
let before = arc.inner().strong.load(Ordering::Relaxed); let before = arc.inner().strong.load(Ordering::Relaxed);
@ -482,23 +483,23 @@ mod tests {
fn runs_destructor_exactly_once_when_strong_count_reaches_zero() { fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
static COUNT: AtomicUsize = AtomicUsize::new(0); static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S; pub struct MyStruct;
impl Drop for S { impl Drop for MyStruct {
fn drop(&mut self) { fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed); COUNT.fetch_add(1, Ordering::Relaxed);
} }
} }
arc_pool!(P: S); arc_pool!(MyArcPool: MyStruct);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<S> = ArcBlock::new(); static mut BLOCK: ArcBlock<MyStruct> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(S).ok().unwrap(); let arc = MyArcPool.alloc(MyStruct).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed)); assert_eq!(0, COUNT.load(Ordering::Relaxed));
@ -512,24 +513,17 @@ mod tests {
#[repr(align(4096))] #[repr(align(4096))]
pub struct Zst4096; pub struct Zst4096;
arc_pool!(P: Zst4096); arc_pool!(MyArcPool: Zst4096);
let block = unsafe { let block = unsafe {
static mut B: ArcBlock<Zst4096> = ArcBlock::new(); static mut BLOCK: ArcBlock<Zst4096> = ArcBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyArcPool.manage(block);
let arc = P.alloc(Zst4096).ok().unwrap(); let arc = MyArcPool.alloc(Zst4096).ok().unwrap();
let raw = &*arc as *const Zst4096; let raw = &*arc as *const Zst4096;
assert_eq!(0, raw as usize % 4096); assert_eq!(0, raw as usize % 4096);
} }
#[test]
fn arc_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
arc_pool!(CamelCaseType: u128);
arc_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
} }

View File

@ -5,35 +5,35 @@
//! ``` //! ```
//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}}; //! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
//! //!
//! box_pool!(P: u128); //! box_pool!(MyBoxPool: u128);
//! //!
//! // cannot allocate without first giving memory blocks to the pool //! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err()); //! assert!(MyBoxPool.alloc(42).is_err());
//! //!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut BoxBlock<u128> = unsafe { //! let block: &'static mut BoxBlock<u128> = unsafe {
//! static mut B: BoxBlock <u128>= BoxBlock::new(); //! static mut BLOCK: BoxBlock <u128>= BoxBlock::new();
//! &mut B //! &mut BLOCK
//! }; //! };
//! //!
//! // give block of memory to the pool //! // give block of memory to the pool
//! P.manage(block); //! MyBoxPool.manage(block);
//! //!
//! // it's now possible to allocate //! // it's now possible to allocate
//! let mut boxed = P.alloc(1).unwrap(); //! let mut boxed = MyBoxPool.alloc(1).unwrap();
//! //!
//! // mutation is possible //! // mutation is possible
//! *boxed += 1; //! *boxed += 1;
//! assert_eq!(2, *boxed); //! assert_eq!(2, *boxed);
//! //!
//! // number of boxes is limited to the number of blocks managed by the pool //! // number of boxes is limited to the number of blocks managed by the pool
//! let res = P.alloc(3); //! let res = MyBoxPool.alloc(3);
//! assert!(res.is_err()); //! assert!(res.is_err());
//! //!
//! // give another memory block to the pool //! // give another memory block to the pool
//! P.manage(unsafe { //! MyBoxPool.manage(unsafe {
//! static mut B: BoxBlock<u128> = BoxBlock::new(); //! static mut BLOCK: BoxBlock<u128> = BoxBlock::new();
//! &mut B //! &mut BLOCK
//! }); //! });
//! //!
//! // cloning also consumes a memory block from the pool //! // cloning also consumes a memory block from the pool
@ -42,14 +42,14 @@
//! assert_eq!(3, *separate_box); //! assert_eq!(3, *separate_box);
//! //!
//! // after the clone it's not possible to allocate again //! // after the clone it's not possible to allocate again
//! let res = P.alloc(4); //! let res = MyBoxPool.alloc(4);
//! assert!(res.is_err()); //! assert!(res.is_err());
//! //!
//! // `boxed`'s destructor returns the memory block to the pool //! // `boxed`'s destructor returns the memory block to the pool
//! drop(boxed); //! drop(boxed);
//! //!
//! // it's possible to allocate again //! // it's possible to allocate again
//! let res = P.alloc(5); //! let res = MyBoxPool.alloc(5);
//! //!
//! assert!(res.is_ok()); //! assert!(res.is_ok());
//! ``` //! ```
@ -62,7 +62,7 @@
//! ``` //! ```
//! use heapless::{box_pool, pool::boxed::BoxBlock}; //! use heapless::{box_pool, pool::boxed::BoxBlock};
//! //!
//! box_pool!(P: u128); //! box_pool!(MyBoxPool: u128);
//! //!
//! const POOL_CAPACITY: usize = 8; //! const POOL_CAPACITY: usize = 8;
//! //!
@ -74,7 +74,7 @@
//! }; //! };
//! //!
//! for block in blocks { //! for block in blocks {
//! P.manage(block); //! MyBoxPool.manage(block);
//! } //! }
//! ``` //! ```
@ -95,13 +95,14 @@ use super::treiber::{NonNullPtr, Stack, UnionNode};
#[macro_export] #[macro_export]
macro_rules! box_pool { macro_rules! box_pool {
($name:ident: $data_type:ty) => { ($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name; pub struct $name;
impl $crate::pool::boxed::BoxPool for $name { impl $crate::pool::boxed::BoxPool for $name {
type Data = $data_type; type Data = $data_type;
fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> { fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)] #[allow(non_upper_case_globals)]
static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> = static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
$crate::pool::boxed::BoxPoolImpl::new(); $crate::pool::boxed::BoxPoolImpl::new();
@ -366,62 +367,62 @@ mod tests {
#[test] #[test]
fn cannot_alloc_if_empty() { fn cannot_alloc_if_empty() {
box_pool!(P: i32); box_pool!(MyBoxPool: i32);
assert_eq!(Err(42), P.alloc(42)); assert_eq!(Err(42), MyBoxPool.alloc(42));
} }
#[test] #[test]
fn can_alloc_if_pool_manages_one_block() { fn can_alloc_if_pool_manages_one_block() {
box_pool!(P: i32); box_pool!(MyBoxPool: i32);
let block = unsafe { let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new(); static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyBoxPool.manage(block);
assert_eq!(42, *P.alloc(42).unwrap()); assert_eq!(42, *MyBoxPool.alloc(42).unwrap());
} }
#[test] #[test]
fn alloc_drop_alloc() { fn alloc_drop_alloc() {
box_pool!(P: i32); box_pool!(MyBoxPool: i32);
let block = unsafe { let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new(); static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyBoxPool.manage(block);
let boxed = P.alloc(1).unwrap(); let boxed = MyBoxPool.alloc(1).unwrap();
drop(boxed); drop(boxed);
assert_eq!(2, *P.alloc(2).unwrap()); assert_eq!(2, *MyBoxPool.alloc(2).unwrap());
} }
#[test] #[test]
fn runs_destructor_exactly_once_on_drop() { fn runs_destructor_exactly_once_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0); static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S; pub struct MyStruct;
impl Drop for S { impl Drop for MyStruct {
fn drop(&mut self) { fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed); COUNT.fetch_add(1, Ordering::Relaxed);
} }
} }
box_pool!(P: S); box_pool!(MyBoxPool: MyStruct);
let block = unsafe { let block = unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyBoxPool.manage(block);
let boxed = P.alloc(S).ok().unwrap(); let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed)); assert_eq!(0, COUNT.load(Ordering::Relaxed));
@ -435,15 +436,15 @@ mod tests {
#[repr(align(4096))] #[repr(align(4096))]
pub struct Zst4096; pub struct Zst4096;
box_pool!(P: Zst4096); box_pool!(MyBoxPool: Zst4096);
let block = unsafe { let block = unsafe {
static mut B: BoxBlock<Zst4096> = BoxBlock::new(); static mut BLOCK: BoxBlock<Zst4096> = BoxBlock::new();
&mut B &mut BLOCK
}; };
P.manage(block); MyBoxPool.manage(block);
let boxed = P.alloc(Zst4096).ok().unwrap(); let boxed = MyBoxPool.alloc(Zst4096).ok().unwrap();
let raw = &*boxed as *const Zst4096; let raw = &*boxed as *const Zst4096;
assert_eq!(0, raw as usize % 4096); assert_eq!(0, raw as usize % 4096);
@ -453,32 +454,32 @@ mod tests {
fn can_clone_if_pool_is_not_exhausted() { fn can_clone_if_pool_is_not_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S; pub struct MyStruct;
impl Clone for S { impl Clone for MyStruct {
fn clone(&self) -> Self { fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self Self
} }
} }
box_pool!(P: S); box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe { MyBoxPool.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}); });
P.manage(unsafe { MyBoxPool.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}); });
let first = P.alloc(S).ok().unwrap(); let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
let _second = first.clone(); let _second = first.clone();
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let is_oom = P.alloc(S).is_err(); let is_oom = MyBoxPool.alloc(MyStruct).is_err();
assert!(is_oom); assert!(is_oom);
} }
@ -486,23 +487,23 @@ mod tests {
fn clone_panics_if_pool_exhausted() { fn clone_panics_if_pool_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S; pub struct MyStruct;
impl Clone for S { impl Clone for MyStruct {
fn clone(&self) -> Self { fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self Self
} }
} }
box_pool!(P: S); box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe { MyBoxPool.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}); });
let first = P.alloc(S).ok().unwrap(); let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let _second = first.clone(); let _second = first.clone();
@ -520,27 +521,27 @@ mod tests {
fn panicking_clone_does_not_leak_memory() { fn panicking_clone_does_not_leak_memory() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false); static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S; pub struct MyStruct;
impl Clone for S { impl Clone for MyStruct {
fn clone(&self) -> Self { fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed); STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
panic!() panic!()
} }
} }
box_pool!(P: S); box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe { MyBoxPool.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}); });
P.manage(unsafe { MyBoxPool.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new(); static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut B &mut BLOCK
}); });
let boxed = P.alloc(S).ok().unwrap(); let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
let thread = thread::spawn(move || { let thread = thread::spawn(move || {
let _boxed = boxed.clone(); let _boxed = boxed.clone();
@ -551,17 +552,10 @@ mod tests {
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed)); assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let once = P.alloc(S); let once = MyBoxPool.alloc(MyStruct);
let twice = P.alloc(S); let twice = MyBoxPool.alloc(MyStruct);
assert!(once.is_ok()); assert!(once.is_ok());
assert!(twice.is_ok()); assert!(twice.is_ok());
} }
#[test]
fn box_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
box_pool!(CamelCaseType: u128);
box_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
} }

View File

@ -5,37 +5,37 @@
//! ``` //! ```
//! use heapless::{object_pool, pool::object::{Object, ObjectBlock}}; //! use heapless::{object_pool, pool::object::{Object, ObjectBlock}};
//! //!
//! object_pool!(P: [u8; 128]); //! object_pool!(MyObjectPool: [u8; 128]);
//! //!
//! // cannot request objects without first giving object blocks to the pool //! // cannot request objects without first giving object blocks to the pool
//! assert!(P.request().is_none()); //! assert!(MyObjectPool.request().is_none());
//! //!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references) //! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe { //! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe {
//! // unlike the memory pool APIs, an initial value must be specified here //! // unlike the memory pool APIs, an initial value must be specified here
//! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]); //! static mut BLOCK: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut B //! &mut BLOCK
//! }; //! };
//! //!
//! // give object block to the pool //! // give object block to the pool
//! P.manage(block); //! MyObjectPool.manage(block);
//! //!
//! // it's now possible to request objects //! // it's now possible to request objects
//! // unlike the memory pool APIs, no initial value is required here //! // unlike the memory pool APIs, no initial value is required here
//! let mut object = P.request().unwrap(); //! let mut object = MyObjectPool.request().unwrap();
//! //!
//! // mutation is possible //! // mutation is possible
//! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1)); //! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1));
//! //!
//! // the number of live objects is limited to the number of blocks managed by the pool //! // the number of live objects is limited to the number of blocks managed by the pool
//! let res = P.request(); //! let res = MyObjectPool.request();
//! assert!(res.is_none()); //! assert!(res.is_none());
//! //!
//! // `object`'s destructor returns the object to the pool //! // `object`'s destructor returns the object to the pool
//! drop(object); //! drop(object);
//! //!
//! // it's possible to request an `Object` again //! // it's possible to request an `Object` again
//! let res = P.request(); //! let res = MyObjectPool.request();
//! //!
//! assert!(res.is_some()); //! assert!(res.is_some());
//! ``` //! ```
@ -48,7 +48,7 @@
//! ``` //! ```
//! use heapless::{object_pool, pool::object::ObjectBlock}; //! use heapless::{object_pool, pool::object::ObjectBlock};
//! //!
//! object_pool!(P: [u8; 128]); //! object_pool!(MyObjectPool: [u8; 128]);
//! //!
//! const POOL_CAPACITY: usize = 8; //! const POOL_CAPACITY: usize = 8;
//! //!
@ -59,7 +59,7 @@
//! }; //! };
//! //!
//! for block in blocks { //! for block in blocks {
//! P.manage(block); //! MyObjectPool.manage(block);
//! } //! }
//! ``` //! ```
@ -82,13 +82,14 @@ use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode};
#[macro_export] #[macro_export]
macro_rules! object_pool { macro_rules! object_pool {
($name:ident: $data_type:ty) => { ($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name; pub struct $name;
impl $crate::pool::object::ObjectPool for $name { impl $crate::pool::object::ObjectPool for $name {
type Data = $data_type; type Data = $data_type;
fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> { fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)] #[allow(non_upper_case_globals)]
static $name: $crate::pool::object::ObjectPoolImpl<$data_type> = static $name: $crate::pool::object::ObjectPoolImpl<$data_type> =
$crate::pool::object::ObjectPoolImpl::new(); $crate::pool::object::ObjectPoolImpl::new();
@ -336,63 +337,63 @@ mod tests {
#[test] #[test]
fn cannot_request_if_empty() { fn cannot_request_if_empty() {
object_pool!(P: i32); object_pool!(MyObjectPool: i32);
assert_eq!(None, P.request()); assert_eq!(None, MyObjectPool.request());
} }
#[test] #[test]
fn can_request_if_manages_one_block() { fn can_request_if_manages_one_block() {
object_pool!(P: i32); object_pool!(MyObjectPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1); static mut BLOCK: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B &mut BLOCK
}; };
P.manage(block); MyObjectPool.manage(block);
assert_eq!(1, *P.request().unwrap()); assert_eq!(1, *MyObjectPool.request().unwrap());
} }
#[test] #[test]
fn request_drop_request() { fn request_drop_request() {
object_pool!(P: i32); object_pool!(MyObjectPool: i32);
let block = unsafe { let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1); static mut BLOCK: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B &mut BLOCK
}; };
P.manage(block); MyObjectPool.manage(block);
let mut object = P.request().unwrap(); let mut object = MyObjectPool.request().unwrap();
*object = 2; *object = 2;
drop(object); drop(object);
assert_eq!(2, *P.request().unwrap()); assert_eq!(2, *MyObjectPool.request().unwrap());
} }
#[test] #[test]
fn destructor_does_not_run_on_drop() { fn destructor_does_not_run_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0); static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S; pub struct MyStruct;
impl Drop for S { impl Drop for MyStruct {
fn drop(&mut self) { fn drop(&mut self) {
COUNT.fetch_add(1, atomic::Ordering::Relaxed); COUNT.fetch_add(1, atomic::Ordering::Relaxed);
} }
} }
object_pool!(P: S); object_pool!(MyObjectPool: MyStruct);
let block = unsafe { let block = unsafe {
static mut B: ObjectBlock<S> = ObjectBlock::new(S); static mut BLOCK: ObjectBlock<MyStruct> = ObjectBlock::new(MyStruct);
&mut B &mut BLOCK
}; };
P.manage(block); MyObjectPool.manage(block);
let object = P.request().unwrap(); let object = MyObjectPool.request().unwrap();
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed)); assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
@ -406,24 +407,17 @@ mod tests {
#[repr(align(4096))] #[repr(align(4096))]
pub struct Zst4096; pub struct Zst4096;
object_pool!(P: Zst4096); object_pool!(MyObjectPool: Zst4096);
let block = unsafe { let block = unsafe {
static mut B: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096); static mut BLOCK: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut B &mut BLOCK
}; };
P.manage(block); MyObjectPool.manage(block);
let object = P.request().unwrap(); let object = MyObjectPool.request().unwrap();
let raw = &*object as *const Zst4096; let raw = &*object as *const Zst4096;
assert_eq!(0, raw as usize % 4096); assert_eq!(0, raw as usize % 4096);
} }
#[test]
fn object_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
object_pool!(CamelCaseType: u128);
object_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
} }