Merge pull request #456 from reitermarkus/fix-pool-macro-warnings

Fix pool macro warnings.
This commit is contained in:
Markus Reiter 2024-02-27 17:19:02 +00:00 committed by GitHub
commit 483862b3e8
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 164 additions and 182 deletions

View File

@ -25,7 +25,7 @@ and this project adheres to [Semantic Versioning](http://semver.org/).
### Fixed
- Fixed clippy lints.
- Fixed `{arc,box,object}_pool!` emitting clippy lints for `CamelCase` and `SNAKE_CASE`.
- Fixed `{arc,box,object}_pool!` emitting clippy lints.
- Fixed the list of implemented data structures in the crate docs, by adding `Deque`,
`HistoryBuffer` and `SortedLinkedList` to the list.

View File

@ -19,13 +19,13 @@
//!
//! - test program:
//!
//! ``` no_run
//! ```no_run
//! use heapless::box_pool;
//!
//! box_pool!(P: ()); // or `arc_pool!` or `object_pool!`
//! box_pool!(MyBoxPool: ()); // or `arc_pool!` or `object_pool!`
//!
//! bkpt();
//! let res = P.alloc(());
//! let res = MyBoxPool.alloc(());
//! bkpt();
//!
//! if let Ok(boxed) = res {

View File

@ -5,23 +5,23 @@
//! ```
//! use heapless::{arc_pool, pool::arc::{Arc, ArcBlock}};
//!
//! arc_pool!(P: u128);
//! arc_pool!(MyArcPool: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//! assert!(MyArcPool.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ArcBlock<u128> = unsafe {
//! static mut B: ArcBlock<u128> = ArcBlock::new();
//! &mut B
//! static mut BLOCK: ArcBlock<u128> = ArcBlock::new();
//! &mut BLOCK
//! };
//!
//! P.manage(block);
//! MyArcPool.manage(block);
//!
//! let arc = P.alloc(1).unwrap();
//! let arc = MyArcPool.alloc(1).unwrap();
//!
//! // number of smart pointers is limited to the number of blocks managed by the pool
//! let res = P.alloc(2);
//! let res = MyArcPool.alloc(2);
//! assert!(res.is_err());
//!
//! // but cloning does not consume an `ArcBlock`
@ -34,7 +34,7 @@
//! drop(arc); // release memory
//!
//! // it's now possible to allocate a new `Arc` smart pointer
//! let res = P.alloc(3);
//! let res = MyArcPool.alloc(3);
//!
//! assert!(res.is_ok());
//! ```
@ -47,7 +47,7 @@
//! ```
//! use heapless::{arc_pool, pool::arc::ArcBlock};
//!
//! arc_pool!(P: u128);
//! arc_pool!(MyArcPool: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
@ -58,7 +58,7 @@
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! MyArcPool.manage(block);
//! }
//! ```
@ -83,13 +83,14 @@ use super::treiber::{NonNullPtr, Stack, UnionNode};
#[macro_export]
macro_rules! arc_pool {
($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name;
impl $crate::pool::arc::ArcPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::arc::ArcPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)]
static $name: $crate::pool::arc::ArcPoolImpl<$data_type> =
$crate::pool::arc::ArcPoolImpl::new();
@ -386,67 +387,67 @@ mod tests {
#[test]
fn cannot_alloc_if_empty() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
assert_eq!(Err(42), P.alloc(42),);
assert_eq!(Err(42), MyArcPool.alloc(42),);
}
#[test]
fn can_alloc_if_manages_one_block() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
assert_eq!(42, *MyArcPool.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(1).unwrap();
let arc = MyArcPool.alloc(1).unwrap();
drop(arc);
assert_eq!(2, *P.alloc(2).unwrap());
assert_eq!(2, *MyArcPool.alloc(2).unwrap());
}
#[test]
fn strong_count_starts_at_one() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc = MyArcPool.alloc(1).ok().unwrap();
assert_eq!(1, arc.inner().strong.load(Ordering::Relaxed));
}
#[test]
fn clone_increases_strong_count() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc = MyArcPool.alloc(1).ok().unwrap();
let before = arc.inner().strong.load(Ordering::Relaxed);
@ -459,15 +460,15 @@ mod tests {
#[test]
fn drop_decreases_strong_count() {
arc_pool!(P: i32);
arc_pool!(MyArcPool: i32);
let block = unsafe {
static mut B: ArcBlock<i32> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<i32> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(1).ok().unwrap();
let arc = MyArcPool.alloc(1).ok().unwrap();
let arc2 = arc.clone();
let before = arc.inner().strong.load(Ordering::Relaxed);
@ -482,23 +483,23 @@ mod tests {
fn runs_destructor_exactly_once_when_strong_count_reaches_zero() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
pub struct MyStruct;
impl Drop for S {
impl Drop for MyStruct {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
arc_pool!(P: S);
arc_pool!(MyArcPool: MyStruct);
let block = unsafe {
static mut B: ArcBlock<S> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<MyStruct> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(S).ok().unwrap();
let arc = MyArcPool.alloc(MyStruct).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
@ -512,24 +513,17 @@ mod tests {
#[repr(align(4096))]
pub struct Zst4096;
arc_pool!(P: Zst4096);
arc_pool!(MyArcPool: Zst4096);
let block = unsafe {
static mut B: ArcBlock<Zst4096> = ArcBlock::new();
&mut B
static mut BLOCK: ArcBlock<Zst4096> = ArcBlock::new();
&mut BLOCK
};
P.manage(block);
MyArcPool.manage(block);
let arc = P.alloc(Zst4096).ok().unwrap();
let arc = MyArcPool.alloc(Zst4096).ok().unwrap();
let raw = &*arc as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
#[test]
fn arc_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
arc_pool!(CamelCaseType: u128);
arc_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
}

View File

@ -5,35 +5,35 @@
//! ```
//! use heapless::{box_pool, pool::boxed::{Box, BoxBlock}};
//!
//! box_pool!(P: u128);
//! box_pool!(MyBoxPool: u128);
//!
//! // cannot allocate without first giving memory blocks to the pool
//! assert!(P.alloc(42).is_err());
//! assert!(MyBoxPool.alloc(42).is_err());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut BoxBlock<u128> = unsafe {
//! static mut B: BoxBlock <u128>= BoxBlock::new();
//! &mut B
//! static mut BLOCK: BoxBlock <u128>= BoxBlock::new();
//! &mut BLOCK
//! };
//!
//! // give block of memory to the pool
//! P.manage(block);
//! MyBoxPool.manage(block);
//!
//! // it's now possible to allocate
//! let mut boxed = P.alloc(1).unwrap();
//! let mut boxed = MyBoxPool.alloc(1).unwrap();
//!
//! // mutation is possible
//! *boxed += 1;
//! assert_eq!(2, *boxed);
//!
//! // number of boxes is limited to the number of blocks managed by the pool
//! let res = P.alloc(3);
//! let res = MyBoxPool.alloc(3);
//! assert!(res.is_err());
//!
//! // give another memory block to the pool
//! P.manage(unsafe {
//! static mut B: BoxBlock<u128> = BoxBlock::new();
//! &mut B
//! MyBoxPool.manage(unsafe {
//! static mut BLOCK: BoxBlock<u128> = BoxBlock::new();
//! &mut BLOCK
//! });
//!
//! // cloning also consumes a memory block from the pool
@ -42,14 +42,14 @@
//! assert_eq!(3, *separate_box);
//!
//! // after the clone it's not possible to allocate again
//! let res = P.alloc(4);
//! let res = MyBoxPool.alloc(4);
//! assert!(res.is_err());
//!
//! // `boxed`'s destructor returns the memory block to the pool
//! drop(boxed);
//!
//! // it's possible to allocate again
//! let res = P.alloc(5);
//! let res = MyBoxPool.alloc(5);
//!
//! assert!(res.is_ok());
//! ```
@ -62,7 +62,7 @@
//! ```
//! use heapless::{box_pool, pool::boxed::BoxBlock};
//!
//! box_pool!(P: u128);
//! box_pool!(MyBoxPool: u128);
//!
//! const POOL_CAPACITY: usize = 8;
//!
@ -74,7 +74,7 @@
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! MyBoxPool.manage(block);
//! }
//! ```
@ -95,13 +95,14 @@ use super::treiber::{NonNullPtr, Stack, UnionNode};
#[macro_export]
macro_rules! box_pool {
($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name;
impl $crate::pool::boxed::BoxPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::boxed::BoxPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)]
static $name: $crate::pool::boxed::BoxPoolImpl<$data_type> =
$crate::pool::boxed::BoxPoolImpl::new();
@ -366,62 +367,62 @@ mod tests {
#[test]
fn cannot_alloc_if_empty() {
box_pool!(P: i32);
box_pool!(MyBoxPool: i32);
assert_eq!(Err(42), P.alloc(42));
assert_eq!(Err(42), MyBoxPool.alloc(42));
}
#[test]
fn can_alloc_if_pool_manages_one_block() {
box_pool!(P: i32);
box_pool!(MyBoxPool: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
&mut BLOCK
};
P.manage(block);
MyBoxPool.manage(block);
assert_eq!(42, *P.alloc(42).unwrap());
assert_eq!(42, *MyBoxPool.alloc(42).unwrap());
}
#[test]
fn alloc_drop_alloc() {
box_pool!(P: i32);
box_pool!(MyBoxPool: i32);
let block = unsafe {
static mut B: BoxBlock<i32> = BoxBlock::new();
&mut B
static mut BLOCK: BoxBlock<i32> = BoxBlock::new();
&mut BLOCK
};
P.manage(block);
MyBoxPool.manage(block);
let boxed = P.alloc(1).unwrap();
let boxed = MyBoxPool.alloc(1).unwrap();
drop(boxed);
assert_eq!(2, *P.alloc(2).unwrap());
assert_eq!(2, *MyBoxPool.alloc(2).unwrap());
}
#[test]
fn runs_destructor_exactly_once_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
pub struct MyStruct;
impl Drop for S {
impl Drop for MyStruct {
fn drop(&mut self) {
COUNT.fetch_add(1, Ordering::Relaxed);
}
}
box_pool!(P: S);
box_pool!(MyBoxPool: MyStruct);
let block = unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
};
P.manage(block);
MyBoxPool.manage(block);
let boxed = P.alloc(S).ok().unwrap();
let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
assert_eq!(0, COUNT.load(Ordering::Relaxed));
@ -435,15 +436,15 @@ mod tests {
#[repr(align(4096))]
pub struct Zst4096;
box_pool!(P: Zst4096);
box_pool!(MyBoxPool: Zst4096);
let block = unsafe {
static mut B: BoxBlock<Zst4096> = BoxBlock::new();
&mut B
static mut BLOCK: BoxBlock<Zst4096> = BoxBlock::new();
&mut BLOCK
};
P.manage(block);
MyBoxPool.manage(block);
let boxed = P.alloc(Zst4096).ok().unwrap();
let boxed = MyBoxPool.alloc(Zst4096).ok().unwrap();
let raw = &*boxed as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
@ -453,32 +454,32 @@ mod tests {
fn can_clone_if_pool_is_not_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
pub struct MyStruct;
impl Clone for S {
impl Clone for MyStruct {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
MyBoxPool.manage(unsafe {
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
MyBoxPool.manage(unsafe {
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
});
let first = P.alloc(S).ok().unwrap();
let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
let _second = first.clone();
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let is_oom = P.alloc(S).is_err();
let is_oom = MyBoxPool.alloc(MyStruct).is_err();
assert!(is_oom);
}
@ -486,23 +487,23 @@ mod tests {
fn clone_panics_if_pool_exhausted() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
pub struct MyStruct;
impl Clone for S {
impl Clone for MyStruct {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
Self
}
}
box_pool!(P: S);
box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
MyBoxPool.manage(unsafe {
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
});
let first = P.alloc(S).ok().unwrap();
let first = MyBoxPool.alloc(MyStruct).ok().unwrap();
let thread = thread::spawn(move || {
let _second = first.clone();
@ -520,27 +521,27 @@ mod tests {
fn panicking_clone_does_not_leak_memory() {
static STRUCT_CLONE_WAS_CALLED: AtomicBool = AtomicBool::new(false);
pub struct S;
pub struct MyStruct;
impl Clone for S {
impl Clone for MyStruct {
fn clone(&self) -> Self {
STRUCT_CLONE_WAS_CALLED.store(true, Ordering::Relaxed);
panic!()
}
}
box_pool!(P: S);
box_pool!(MyBoxPool: MyStruct);
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
MyBoxPool.manage(unsafe {
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
});
P.manage(unsafe {
static mut B: BoxBlock<S> = BoxBlock::new();
&mut B
MyBoxPool.manage(unsafe {
static mut BLOCK: BoxBlock<MyStruct> = BoxBlock::new();
&mut BLOCK
});
let boxed = P.alloc(S).ok().unwrap();
let boxed = MyBoxPool.alloc(MyStruct).ok().unwrap();
let thread = thread::spawn(move || {
let _boxed = boxed.clone();
@ -551,17 +552,10 @@ mod tests {
assert!(STRUCT_CLONE_WAS_CALLED.load(Ordering::Relaxed));
let once = P.alloc(S);
let twice = P.alloc(S);
let once = MyBoxPool.alloc(MyStruct);
let twice = MyBoxPool.alloc(MyStruct);
assert!(once.is_ok());
assert!(twice.is_ok());
}
#[test]
fn box_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
box_pool!(CamelCaseType: u128);
box_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
}

View File

@ -5,37 +5,37 @@
//! ```
//! use heapless::{object_pool, pool::object::{Object, ObjectBlock}};
//!
//! object_pool!(P: [u8; 128]);
//! object_pool!(MyObjectPool: [u8; 128]);
//!
//! // cannot request objects without first giving object blocks to the pool
//! assert!(P.request().is_none());
//! assert!(MyObjectPool.request().is_none());
//!
//! // (some `no_std` runtimes have safe APIs to create `&'static mut` references)
//! let block: &'static mut ObjectBlock<[u8; 128]> = unsafe {
//! // unlike the memory pool APIs, an initial value must be specified here
//! static mut B: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut B
//! static mut BLOCK: ObjectBlock<[u8; 128]>= ObjectBlock::new([0; 128]);
//! &mut BLOCK
//! };
//!
//! // give object block to the pool
//! P.manage(block);
//! MyObjectPool.manage(block);
//!
//! // it's now possible to request objects
//! // unlike the memory pool APIs, no initial value is required here
//! let mut object = P.request().unwrap();
//! let mut object = MyObjectPool.request().unwrap();
//!
//! // mutation is possible
//! object.iter_mut().for_each(|byte| *byte = byte.wrapping_add(1));
//!
//! // the number of live objects is limited to the number of blocks managed by the pool
//! let res = P.request();
//! let res = MyObjectPool.request();
//! assert!(res.is_none());
//!
//! // `object`'s destructor returns the object to the pool
//! drop(object);
//!
//! // it's possible to request an `Object` again
//! let res = P.request();
//! let res = MyObjectPool.request();
//!
//! assert!(res.is_some());
//! ```
@ -48,7 +48,7 @@
//! ```
//! use heapless::{object_pool, pool::object::ObjectBlock};
//!
//! object_pool!(P: [u8; 128]);
//! object_pool!(MyObjectPool: [u8; 128]);
//!
//! const POOL_CAPACITY: usize = 8;
//!
@ -59,7 +59,7 @@
//! };
//!
//! for block in blocks {
//! P.manage(block);
//! MyObjectPool.manage(block);
//! }
//! ```
@ -82,13 +82,14 @@ use super::treiber::{AtomicPtr, NonNullPtr, Stack, StructNode};
#[macro_export]
macro_rules! object_pool {
($name:ident: $data_type:ty) => {
#[allow(non_camel_case_types)]
pub struct $name;
impl $crate::pool::object::ObjectPool for $name {
type Data = $data_type;
fn singleton() -> &'static $crate::pool::object::ObjectPoolImpl<$data_type> {
// Even though the static variable is not exposed to user code, it is
// still useful to have a descriptive symbol name for debugging.
#[allow(non_upper_case_globals)]
static $name: $crate::pool::object::ObjectPoolImpl<$data_type> =
$crate::pool::object::ObjectPoolImpl::new();
@ -336,63 +337,63 @@ mod tests {
#[test]
fn cannot_request_if_empty() {
object_pool!(P: i32);
object_pool!(MyObjectPool: i32);
assert_eq!(None, P.request());
assert_eq!(None, MyObjectPool.request());
}
#[test]
fn can_request_if_manages_one_block() {
object_pool!(P: i32);
object_pool!(MyObjectPool: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
static mut BLOCK: ObjectBlock<i32> = ObjectBlock::new(1);
&mut BLOCK
};
P.manage(block);
MyObjectPool.manage(block);
assert_eq!(1, *P.request().unwrap());
assert_eq!(1, *MyObjectPool.request().unwrap());
}
#[test]
fn request_drop_request() {
object_pool!(P: i32);
object_pool!(MyObjectPool: i32);
let block = unsafe {
static mut B: ObjectBlock<i32> = ObjectBlock::new(1);
&mut B
static mut BLOCK: ObjectBlock<i32> = ObjectBlock::new(1);
&mut BLOCK
};
P.manage(block);
MyObjectPool.manage(block);
let mut object = P.request().unwrap();
let mut object = MyObjectPool.request().unwrap();
*object = 2;
drop(object);
assert_eq!(2, *P.request().unwrap());
assert_eq!(2, *MyObjectPool.request().unwrap());
}
#[test]
fn destructor_does_not_run_on_drop() {
static COUNT: AtomicUsize = AtomicUsize::new(0);
pub struct S;
pub struct MyStruct;
impl Drop for S {
impl Drop for MyStruct {
fn drop(&mut self) {
COUNT.fetch_add(1, atomic::Ordering::Relaxed);
}
}
object_pool!(P: S);
object_pool!(MyObjectPool: MyStruct);
let block = unsafe {
static mut B: ObjectBlock<S> = ObjectBlock::new(S);
&mut B
static mut BLOCK: ObjectBlock<MyStruct> = ObjectBlock::new(MyStruct);
&mut BLOCK
};
P.manage(block);
MyObjectPool.manage(block);
let object = P.request().unwrap();
let object = MyObjectPool.request().unwrap();
assert_eq!(0, COUNT.load(atomic::Ordering::Relaxed));
@ -406,24 +407,17 @@ mod tests {
#[repr(align(4096))]
pub struct Zst4096;
object_pool!(P: Zst4096);
object_pool!(MyObjectPool: Zst4096);
let block = unsafe {
static mut B: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut B
static mut BLOCK: ObjectBlock<Zst4096> = ObjectBlock::new(Zst4096);
&mut BLOCK
};
P.manage(block);
MyObjectPool.manage(block);
let object = P.request().unwrap();
let object = MyObjectPool.request().unwrap();
let raw = &*object as *const Zst4096;
assert_eq!(0, raw as usize % 4096);
}
#[test]
fn object_pool_case() {
// https://github.com/rust-embedded/heapless/issues/411
object_pool!(CamelCaseType: u128);
object_pool!(SCREAMING_SNAKE_CASE_TYPE: u128);
}
}