Use TLSF by default (#4130)

* Use TLSF by default

* Update example heap usage to leave more stack

* Fix test configurations
This commit is contained in:
Dániel Buga 2025-09-17 16:03:32 +02:00 committed by GitHub
parent acf3327fa3
commit 42297811be
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
17 changed files with 254 additions and 79 deletions

View File

@ -11,10 +11,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added chip-selection features (#4023)
- New default feature (`compat`) enables implementations for `malloc`, `free`, `calloc`, `realloc` and others (#3890, #4043)
- `ESP_ALLOC_CONFIG_HEAP_ALGORITHM` to select the global heap algorithm (#4130)
### Changed
- Make stats structs fields public (#3828)
- The default heap allocator is now TLSF, implemented by the `rlsf` crate (#3950)
### Fixed

View File

@ -24,9 +24,14 @@ defmt = { version = "1.0.1", optional = true }
cfg-if = "1.0.0"
enumset = "1.1.6"
esp-sync = { version = "0.0.0", path = "../esp-sync" }
linked_list_allocator = { version = "0.10.5", default-features = false, features = ["const_mut_refs"] }
document-features = "0.2.11"
linked_list_allocator = { version = "0.10.5", default-features = false, features = ["const_mut_refs"] }
rlsf = { version = "0.2", features = ["unstable"] }
[build-dependencies]
esp-config = { version = "0.5.0", path = "../esp-config", features = ["build"] }
[features]
default = ["compat"]

9
esp-alloc/build.rs Normal file
View File

@ -0,0 +1,9 @@
use esp_config::generate_config_from_yaml_definition;
fn main() {
// emit config
println!("cargo:rerun-if-changed=./esp_config.yml");
let cfg_yaml = std::fs::read_to_string("./esp_config.yml")
.expect("Failed to read esp_config.yml for esp-alloc");
generate_config_from_yaml_definition(&cfg_yaml, true, true, None).unwrap();
}

14
esp-alloc/esp_config.yml Normal file
View File

@ -0,0 +1,14 @@
crate: esp-alloc
options:
- name: heap_algorithm
description: "The heap algorithm to use. TLSF offers higher performance
and bounded allocation time, but uses more memory."
default:
- value: '"TLSF"'
constraints:
- type:
validator: enumeration
value:
- "LLFF"
- "TLSF"

View File

@ -0,0 +1,40 @@
use core::{alloc::Layout, ptr::NonNull};
use linked_list_allocator::Heap;
pub(crate) struct LlffHeap {
heap: Heap,
}
impl LlffHeap {
pub unsafe fn new(heap_bottom: *mut u8, size: usize) -> Self {
let mut heap = Heap::empty();
unsafe { heap.init(heap_bottom, size) };
Self { heap }
}
pub fn size(&self) -> usize {
self.heap.size()
}
pub fn used(&self) -> usize {
self.heap.used()
}
pub fn free(&self) -> usize {
self.heap.free()
}
pub fn allocate(&mut self, layout: Layout) -> Option<NonNull<u8>> {
self.heap.allocate_first_fit(layout).ok()
}
pub(crate) unsafe fn try_deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> bool {
if self.heap.bottom() <= ptr.as_ptr() && self.heap.top() >= ptr.as_ptr() {
unsafe { self.heap.deallocate(ptr, layout) };
true
} else {
false
}
}
}

View File

@ -0,0 +1,9 @@
#[cfg(heap_algorithm_llff)]
mod llff;
#[cfg(heap_algorithm_tlsf)]
mod tlsf;
#[cfg(heap_algorithm_llff)]
pub(crate) use llff::LlffHeap as Heap;
#[cfg(heap_algorithm_tlsf)]
pub(crate) use tlsf::TlsfHeap as Heap;

View File

@ -0,0 +1,69 @@
use core::{alloc::Layout, ptr::NonNull};
use rlsf::Tlsf;
// TODO: make this configurable
type Heap = Tlsf<'static, usize, usize, { usize::BITS as usize }, { usize::BITS as usize }>;
pub(crate) struct TlsfHeap {
heap: Heap,
pool_start: usize,
pool_end: usize,
}
impl TlsfHeap {
pub unsafe fn new(heap_bottom: *mut u8, size: usize) -> Self {
let mut heap = Heap::new();
let block = unsafe { core::slice::from_raw_parts(heap_bottom, size) };
let actual_size = unsafe { heap.insert_free_block_ptr(block.into()).unwrap() };
Self {
heap,
pool_start: heap_bottom as usize,
pool_end: heap_bottom as usize + actual_size.get(),
}
}
pub fn size(&self) -> usize {
self.pool_end - self.pool_start
}
pub fn used(&self) -> usize {
let mut used = 0;
let pool =
unsafe { core::slice::from_raw_parts(self.pool_start as *const u8, self.size()) };
for block in unsafe { self.heap.iter_blocks(NonNull::from(pool)) } {
if block.is_occupied() {
used += block.size();
}
}
used
}
pub fn free(&self) -> usize {
let mut free = 0;
let pool =
unsafe { core::slice::from_raw_parts(self.pool_start as *const u8, self.size()) };
for block in unsafe { self.heap.iter_blocks(NonNull::from(pool)) } {
if !block.is_occupied() {
free += block.max_payload_size();
}
}
free
}
pub fn allocate(&mut self, layout: Layout) -> Option<NonNull<u8>> {
self.heap.allocate(layout)
}
pub(crate) unsafe fn try_deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> bool {
let addr = ptr.addr().get();
if self.pool_start <= addr && self.pool_end > addr {
unsafe { self.heap.deallocate(ptr, layout.align()) };
true
} else {
false
}
}
}

View File

@ -143,6 +143,7 @@
#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
mod allocators;
mod heap;
mod macros;
#[cfg(feature = "compat")]
mod malloc;
@ -156,7 +157,8 @@ use core::{
pub use allocators::*;
use enumset::{EnumSet, EnumSetType};
use esp_sync::NonReentrantMutex;
use linked_list_allocator::Heap;
use crate::heap::Heap;
/// The global allocator instance
#[global_allocator]
@ -275,23 +277,41 @@ impl HeapRegion {
size: usize,
capabilities: EnumSet<MemoryCapability>,
) -> Self {
unsafe {
let mut heap = Heap::empty();
heap.init(heap_bottom, size);
Self { heap, capabilities }
Self {
heap: unsafe { Heap::new(heap_bottom, size) },
capabilities,
}
}
/// Return stats for the current memory region
pub fn stats(&self) -> RegionStats {
RegionStats {
size: self.heap.size(),
used: self.heap.used(),
free: self.heap.free(),
size: self.size(),
used: self.used(),
free: self.free(),
capabilities: self.capabilities,
}
}
fn size(&self) -> usize {
self.heap.size()
}
fn used(&self) -> usize {
self.heap.used()
}
fn free(&self) -> usize {
self.heap.free()
}
fn allocate(&mut self, layout: Layout) -> Option<NonNull<u8>> {
self.heap.allocate(layout)
}
unsafe fn try_deallocate(&mut self, ptr: NonNull<u8>, layout: Layout) -> bool {
unsafe { self.heap.try_deallocate(ptr, layout) }
}
}
/// Stats for a heap allocator
@ -503,44 +523,34 @@ impl EspHeapInner {
) -> *mut u8 {
#[cfg(feature = "internal-heap-stats")]
let before = self.used();
let mut iter = self
.heap
.iter_mut()
.filter_map(|region| region.as_mut())
.filter(|region| region.capabilities.is_superset(capabilities));
let mut iter = self.heap.iter_mut().filter(|region| {
if region.is_some() {
region
.as_ref()
.unwrap()
.capabilities
.is_superset(capabilities)
} else {
false
}
});
let allocation = loop {
let Some(region) = iter.next() else {
return ptr::null_mut();
};
let res = loop {
if let Some(Some(region)) = iter.next() {
let res = region.heap.allocate_first_fit(layout);
if let Ok(res) = res {
break Some(res);
}
} else {
break None;
if let Some(res) = region.allocate(layout) {
break res;
}
};
res.map_or(ptr::null_mut(), |allocation| {
#[cfg(feature = "internal-heap-stats")]
{
// We need to call used because [linked_list_allocator::Heap] does internal size
// alignment so we cannot use the size provided by the layout.
let used = self.used();
#[cfg(feature = "internal-heap-stats")]
{
// We need to call used because the heap impls have some internal overhead
// so we cannot use the size provided by the layout.
let used = self.used();
self.internal_heap_stats.total_allocated += used - before;
self.internal_heap_stats.max_usage =
core::cmp::max(self.internal_heap_stats.max_usage, used);
}
self.internal_heap_stats.total_allocated += used - before;
self.internal_heap_stats.max_usage =
core::cmp::max(self.internal_heap_stats.max_usage, used);
}
allocation.as_ptr()
})
allocation.as_ptr()
}
}
@ -637,9 +647,9 @@ unsafe impl GlobalAlloc for EspHeap {
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
if ptr.is_null() {
let Some(ptr) = NonNull::new(ptr) else {
return;
}
};
self.inner.with(|this| {
#[cfg(feature = "internal-heap-stats")]
@ -647,8 +657,8 @@ unsafe impl GlobalAlloc for EspHeap {
let mut iter = this.heap.iter_mut();
while let Some(Some(region)) = iter.next() {
if region.heap.bottom() <= ptr && region.heap.top() >= ptr {
unsafe { region.heap.deallocate(NonNull::new_unchecked(ptr), layout) };
if unsafe { region.try_deallocate(ptr, layout) } {
break;
}
}

View File

@ -33,7 +33,8 @@ fn main() -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let delay = Delay::new();

View File

@ -40,7 +40,8 @@ fn main() -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -44,7 +44,8 @@ fn main() -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -41,7 +41,8 @@ fn main() -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -55,7 +55,8 @@ async fn main(spawner: Spawner) -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -70,7 +70,8 @@ async fn main(spawner: Spawner) -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -46,7 +46,8 @@ async fn main(spawner: Spawner) -> ! {
let config = esp_hal::Config::default().with_cpu_clock(CpuClock::max());
let peripherals = esp_hal::init(config);
esp_alloc::heap_allocator!(size: 72 * 1024);
esp_alloc::heap_allocator!(#[unsafe(link_section = ".dram2_uninit")] size: 64 * 1024);
esp_alloc::heap_allocator!(size: 36 * 1024);
let timg0 = TimerGroup::new(peripherals.TIMG0);
esp_preempt::start(timg0.timer0);

View File

@ -326,6 +326,7 @@ fn run_cipher_tests(buffer: &mut [u8]) {
);
}
#[cfg(aes_dma)]
fn run_unaligned_dma_tests<const MAX_SHIFT: usize>(memory: &mut [u8]) {
let zeros = [0; MAX_SHIFT];

View File

@ -1,9 +1,12 @@
//! Allocator and PSRAM-related tests
//% CHIPS(quad): esp32 esp32s2
//% CHIPS:
//% CHIPS(llff_quad, tlsf_quad): esp32 esp32s2
// The S3 dev kit in the HIL-tester has octal PSRAM.
//% CHIPS(octal): esp32s3
//% ENV(octal): ESP_HAL_CONFIG_PSRAM_MODE=octal
//% CHIPS(llff_octal, tlsf_octal): esp32s3
//% ENV(llff_octal, tlsf_octal): ESP_HAL_CONFIG_PSRAM_MODE=octal
//% ENV(llff_octal, llff_quad): ESP_ALLOC_CONFIG_HEAP_ALGORITHM=LLFF
//% ENV(tlsf_octal, tlsf_quad): ESP_ALLOC_CONFIG_HEAP_ALGORITHM=TLSF
//% FEATURES: unstable psram esp-storage esp-alloc/nightly
#![no_std]
@ -50,16 +53,18 @@ mod tests {
#[test]
fn all_psram_is_usable() {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = AllocVec::with_capacity(free);
if option_env!("ESP_ALLOC_CONFIG_HEAP_ALGORITHM") == Some("LLFF") {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = AllocVec::with_capacity(free);
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
}
}
}
@ -109,31 +114,35 @@ mod tests {
#[test]
fn all_psram_is_usable_with_external_mem_allocator() {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = Vec::with_capacity_in(free, ExternalMemory);
if option_env!("ESP_ALLOC_CONFIG_HEAP_ALGORITHM") == Some("LLFF") {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = Vec::with_capacity_in(free, ExternalMemory);
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
}
}
}
#[test]
fn all_psram_is_usable_with_any_mem_allocator() {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = Vec::with_capacity_in(free, AnyMemory);
if option_env!("ESP_ALLOC_CONFIG_HEAP_ALGORITHM") == Some("LLFF") {
let free = esp_alloc::HEAP.free();
defmt::info!("Free: {}", free);
let mut vec = Vec::with_capacity_in(free, AnyMemory);
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
vec.push((i % 256) as u8);
}
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
for i in 0..free {
assert_eq!(vec[i], (i % 256) as u8);
}
}
}