Introduce esp-sync, avoid critical_section like the plague (#4023)

* Introduce esp-sync

* Avoid critical_section as much as possible
This commit is contained in:
Dániel Buga 2025-09-03 11:34:18 +02:00 committed by GitHub
parent dcdf0ba61f
commit 99e2b936df
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
73 changed files with 1939 additions and 1470 deletions

View File

@ -46,6 +46,8 @@ jobs:
- 'esp-lp-hal/**'
esp-metadata:
- 'esp-metadata/**'
esp-preempt:
- 'esp-preempt/**'
esp-println:
- 'esp-println/**'
esp-riscv-rt:
@ -56,6 +58,8 @@ jobs:
- 'esp-storage/**'
esp-radio:
- 'esp-radio/**'
esp-sync:
- 'esp-sync/**'
xtensa-lx:
- 'xtensa-lx/**'
xtensa-lx-rt:
@ -133,6 +137,14 @@ jobs:
skipLabels: "skip-changelog"
missingUpdateErrorMessage: "Please add a changelog entry in the esp-lp-hal/CHANGELOG.md file."
- name: Check that changelog updated (esp-preempt)
if: steps.changes.outputs.esp-preempt == 'true'
uses: dangoslen/changelog-enforcer@v3
with:
changeLogPath: esp-preempt/CHANGELOG.md
skipLabels: "skip-changelog"
missingUpdateErrorMessage: "Please add a changelog entry in the esp-preempt/CHANGELOG.md file."
- name: Check that changelog updated (esp-println)
if: steps.changes.outputs.esp-println == 'true'
uses: dangoslen/changelog-enforcer@v3
@ -165,6 +177,14 @@ jobs:
skipLabels: "skip-changelog"
missingUpdateErrorMessage: "Please add a changelog entry in the esp-radio/CHANGELOG.md file."
- name: Check that changelog updated (esp-sync)
if: steps.changes.outputs.esp-sync == 'true'
uses: dangoslen/changelog-enforcer@v3
with:
changeLogPath: esp-sync/CHANGELOG.md
skipLabels: "skip-changelog"
missingUpdateErrorMessage: "Please add a changelog entry in the esp-sync/CHANGELOG.md file."
- name: Check that changelog updated (xtensa-lx)
if: steps.changes.outputs.xtensa-lx == 'true'
uses: dangoslen/changelog-enforcer@v3

View File

@ -19,6 +19,7 @@ exclude = [
"esp-riscv-rt",
"esp-radio",
"esp-storage",
"esp-sync",
"examples",
"extras/bench-server",
"extras/esp-wifishark",

View File

@ -9,9 +9,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
- Added chip-selection features (#4023)
- New default feature (`compat`) enables implementations for `malloc`, `free`, `calloc`, `realloc` (#3890)
### Changed
- Make stats structs fields public (#3828)
### Fixed

View File

@ -22,8 +22,8 @@ test = false
allocator-api2 = { version = "0.3.0", default-features = false }
defmt = { version = "1.0.1", optional = true }
cfg-if = "1.0.0"
critical-section = "1.2.0"
enumset = "1.1.6"
esp-sync = { version = "0.0.0", path = "../esp-sync" }
linked_list_allocator = { version = "0.10.5", default-features = false, features = ["const_mut_refs"] }
document-features = "0.2.11"
@ -46,3 +46,22 @@ internal-heap-stats = []
## Provide C-compatibility functions (malloc, free, ...)
compat = []
#! ### Chip selection
#! One of the following features must be enabled to select the target chip:
##
esp32c2 = ["esp-sync/esp32c2"]
##
esp32c3 = ["esp-sync/esp32c3"]
##
esp32c6 = ["esp-sync/esp32c6"]
##
esp32h2 = ["esp-sync/esp32h2"]
##
esp32 = ["esp-sync/esp32"]
##
esp32s2 = ["esp-sync/esp32s2"]
##
esp32s3 = ["esp-sync/esp32s3"]

View File

@ -149,22 +149,19 @@ mod malloc;
use core::{
alloc::{GlobalAlloc, Layout},
cell::RefCell,
fmt::Display,
ptr::{self, NonNull},
};
pub use allocators::*;
use critical_section::Mutex;
use enumset::{EnumSet, EnumSetType};
use esp_sync::NonReentrantMutex;
use linked_list_allocator::Heap;
/// The global allocator instance
#[global_allocator]
pub static HEAP: EspHeap = EspHeap::empty();
const NON_REGION: Option<HeapRegion> = None;
const BAR_WIDTH: usize = 35;
fn write_bar(f: &mut core::fmt::Formatter<'_>, usage_percent: usize) -> core::fmt::Result {
@ -376,27 +373,190 @@ struct InternalHeapStats {
total_freed: usize,
}
struct EspHeapInner {
heap: [Option<HeapRegion>; 3],
#[cfg(feature = "internal-heap-stats")]
internal_heap_stats: InternalHeapStats,
}
impl EspHeapInner {
/// Crate a new UNINITIALIZED heap allocator
pub const fn empty() -> Self {
EspHeapInner {
heap: [const { None }; 3],
#[cfg(feature = "internal-heap-stats")]
internal_heap_stats: InternalHeapStats {
max_usage: 0,
total_allocated: 0,
total_freed: 0,
},
}
}
pub unsafe fn add_region(&mut self, region: HeapRegion) {
let free = self
.heap
.iter()
.enumerate()
.find(|v| v.1.is_none())
.map(|v| v.0);
if let Some(free) = free {
self.heap[free] = Some(region);
} else {
panic!(
"Exceeded the maximum of {} heap memory regions",
self.heap.len()
);
}
}
/// Returns an estimate of the amount of bytes in use in all memory regions.
pub fn used(&self) -> usize {
let mut used = 0;
for region in self.heap.iter() {
if let Some(region) = region.as_ref() {
used += region.heap.used();
}
}
used
}
/// Return usage stats for the [Heap].
///
/// Note:
/// [HeapStats] directly implements [Display], so this function can be
/// called from within `println!()` to pretty-print the usage of the
/// heap.
pub fn stats(&self) -> HeapStats {
let mut region_stats: [Option<RegionStats>; 3] = [const { None }; 3];
let mut used = 0;
let mut free = 0;
for (id, region) in self.heap.iter().enumerate() {
if let Some(region) = region.as_ref() {
let stats = region.stats();
free += stats.free;
used += stats.used;
region_stats[id] = Some(region.stats());
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "internal-heap-stats")] {
HeapStats {
region_stats,
size: free + used,
current_usage: used,
max_usage: self.internal_heap_stats.max_usage,
total_allocated: self.internal_heap_stats.total_allocated,
total_freed: self.internal_heap_stats.total_freed,
}
} else {
HeapStats {
region_stats,
size: free + used,
current_usage: used,
}
}
}
}
/// Returns an estimate of the amount of bytes available.
pub fn free(&self) -> usize {
self.free_caps(EnumSet::empty())
}
/// The free heap satisfying the given requirements
pub fn free_caps(&self, capabilities: EnumSet<MemoryCapability>) -> usize {
let mut free = 0;
for region in self.heap.iter().filter(|region| {
if region.is_some() {
region
.as_ref()
.unwrap()
.capabilities
.is_superset(capabilities)
} else {
false
}
}) {
if let Some(region) = region.as_ref() {
free += region.heap.free();
}
}
free
}
/// Allocate memory in a region satisfying the given requirements.
///
/// # Safety
///
/// This function is unsafe because undefined behavior can result
/// if the caller does not ensure that `layout` has non-zero size.
///
/// The allocated block of memory may or may not be initialized.
unsafe fn alloc_caps(
&mut self,
capabilities: EnumSet<MemoryCapability>,
layout: Layout,
) -> *mut u8 {
#[cfg(feature = "internal-heap-stats")]
let before = self.used();
let mut iter = self.heap.iter_mut().filter(|region| {
if region.is_some() {
region
.as_ref()
.unwrap()
.capabilities
.is_superset(capabilities)
} else {
false
}
});
let res = loop {
if let Some(Some(region)) = iter.next() {
let res = region.heap.allocate_first_fit(layout);
if let Ok(res) = res {
break Some(res);
}
} else {
break None;
}
};
res.map_or(ptr::null_mut(), |allocation| {
#[cfg(feature = "internal-heap-stats")]
{
// We need to call used because [linked_list_allocator::Heap] does internal size
// alignment so we cannot use the size provided by the layout.
let used = self.used();
self.internal_heap_stats.total_allocated += used - before;
self.internal_heap_stats.max_usage =
core::cmp::max(self.internal_heap_stats.max_usage, used);
}
allocation.as_ptr()
})
}
}
/// A memory allocator
///
/// In addition to what Rust's memory allocator can do it allows to allocate
/// memory in regions satisfying specific needs.
pub struct EspHeap {
heap: Mutex<RefCell<[Option<HeapRegion>; 3]>>,
#[cfg(feature = "internal-heap-stats")]
internal_heap_stats: Mutex<RefCell<InternalHeapStats>>,
inner: NonReentrantMutex<EspHeapInner>,
}
impl EspHeap {
/// Crate a new UNINITIALIZED heap allocator
pub const fn empty() -> Self {
EspHeap {
heap: Mutex::new(RefCell::new([NON_REGION; 3])),
#[cfg(feature = "internal-heap-stats")]
internal_heap_stats: Mutex::new(RefCell::new(InternalHeapStats {
max_usage: 0,
total_allocated: 0,
total_freed: 0,
})),
inner: NonReentrantMutex::new(EspHeapInner::empty()),
}
}
@ -425,37 +585,12 @@ impl EspHeap {
/// - The supplied memory region must be exclusively available to the heap only, no aliasing.
/// - `size > 0`.
pub unsafe fn add_region(&self, region: HeapRegion) {
critical_section::with(|cs| {
let mut regions = self.heap.borrow_ref_mut(cs);
let free = regions
.iter()
.enumerate()
.find(|v| v.1.is_none())
.map(|v| v.0);
if let Some(free) = free {
regions[free] = Some(region);
} else {
panic!(
"Exceeded the maximum of {} heap memory regions",
regions.len()
);
}
});
self.inner.with(|heap| unsafe { heap.add_region(region) })
}
/// Returns an estimate of the amount of bytes in use in all memory regions.
pub fn used(&self) -> usize {
critical_section::with(|cs| {
let regions = self.heap.borrow_ref(cs);
let mut used = 0;
for region in regions.iter() {
if let Some(region) = region.as_ref() {
used += region.heap.used();
}
}
used
})
self.inner.with(|heap| heap.used())
}
/// Return usage stats for the [Heap].
@ -465,71 +600,17 @@ impl EspHeap {
/// called from within `println!()` to pretty-print the usage of the
/// heap.
pub fn stats(&self) -> HeapStats {
const EMPTY_REGION_STAT: Option<RegionStats> = None;
let mut region_stats: [Option<RegionStats>; 3] = [EMPTY_REGION_STAT; 3];
critical_section::with(|cs| {
let mut used = 0;
let mut free = 0;
let regions = self.heap.borrow_ref(cs);
for (id, region) in regions.iter().enumerate() {
if let Some(region) = region.as_ref() {
let stats = region.stats();
free += stats.free;
used += stats.used;
region_stats[id] = Some(region.stats());
}
}
cfg_if::cfg_if! {
if #[cfg(feature = "internal-heap-stats")] {
let internal_heap_stats = self.internal_heap_stats.borrow_ref(cs);
HeapStats {
region_stats,
size: free + used,
current_usage: used,
max_usage: internal_heap_stats.max_usage,
total_allocated: internal_heap_stats.total_allocated,
total_freed: internal_heap_stats.total_freed,
}
} else {
HeapStats {
region_stats,
size: free + used,
current_usage: used,
}
}
}
})
self.inner.with(|heap| heap.stats())
}
/// Returns an estimate of the amount of bytes available.
pub fn free(&self) -> usize {
self.free_caps(EnumSet::empty())
self.inner.with(|heap| heap.free())
}
/// The free heap satisfying the given requirements
pub fn free_caps(&self, capabilities: EnumSet<MemoryCapability>) -> usize {
critical_section::with(|cs| {
let regions = self.heap.borrow_ref(cs);
let mut free = 0;
for region in regions.iter().filter(|region| {
if region.is_some() {
region
.as_ref()
.unwrap()
.capabilities
.is_superset(capabilities)
} else {
false
}
}) {
if let Some(region) = region.as_ref() {
free += region.heap.free();
}
}
free
})
self.inner.with(|heap| heap.free_caps(capabilities))
}
/// Allocate memory in a region satisfying the given requirements.
@ -545,50 +626,8 @@ impl EspHeap {
capabilities: EnumSet<MemoryCapability>,
layout: Layout,
) -> *mut u8 {
critical_section::with(|cs| {
#[cfg(feature = "internal-heap-stats")]
let before = self.used();
let mut regions = self.heap.borrow_ref_mut(cs);
let mut iter = (*regions).iter_mut().filter(|region| {
if region.is_some() {
region
.as_ref()
.unwrap()
.capabilities
.is_superset(capabilities)
} else {
false
}
});
let res = loop {
if let Some(Some(region)) = iter.next() {
let res = region.heap.allocate_first_fit(layout);
if let Ok(res) = res {
break Some(res);
}
} else {
break None;
}
};
res.map_or(ptr::null_mut(), |allocation| {
#[cfg(feature = "internal-heap-stats")]
{
let mut internal_heap_stats = self.internal_heap_stats.borrow_ref_mut(cs);
drop(regions);
// We need to call used because [linked_list_allocator::Heap] does internal size
// alignment so we cannot use the size provided by the layout.
let used = self.used();
internal_heap_stats.total_allocated += used - before;
internal_heap_stats.max_usage =
core::cmp::max(internal_heap_stats.max_usage, used);
}
allocation.as_ptr()
})
})
self.inner
.with(|heap| unsafe { heap.alloc_caps(capabilities, layout) })
}
}
@ -598,33 +637,28 @@ unsafe impl GlobalAlloc for EspHeap {
}
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
unsafe {
if ptr.is_null() {
return;
if ptr.is_null() {
return;
}
self.inner.with(|this| {
#[cfg(feature = "internal-heap-stats")]
let before = this.used();
let mut iter = this.heap.iter_mut();
while let Some(Some(region)) = iter.next() {
if region.heap.bottom() <= ptr && region.heap.top() >= ptr {
unsafe { region.heap.deallocate(NonNull::new_unchecked(ptr), layout) };
}
}
critical_section::with(|cs| {
#[cfg(feature = "internal-heap-stats")]
let before = self.used();
let mut regions = self.heap.borrow_ref_mut(cs);
let mut iter = (*regions).iter_mut();
while let Some(Some(region)) = iter.next() {
if region.heap.bottom() <= ptr && region.heap.top() >= ptr {
region.heap.deallocate(NonNull::new_unchecked(ptr), layout);
}
}
#[cfg(feature = "internal-heap-stats")]
{
let mut internal_heap_stats = self.internal_heap_stats.borrow_ref_mut(cs);
drop(regions);
// We need to call `used()` because [linked_list_allocator::Heap] does internal
// size alignment so we cannot use the size provided by the
// layout.
internal_heap_stats.total_freed += before - self.used();
}
})
}
#[cfg(feature = "internal-heap-stats")]
{
// We need to call `used()` because [linked_list_allocator::Heap] does internal
// size alignment so we cannot use the size provided by the
// layout.
this.internal_heap_stats.total_freed += before - this.used();
}
})
}
}

View File

@ -20,7 +20,6 @@ test = false
[dependencies]
cfg-if = "1.0.0"
critical-section = "1.1.2"
defmt = { version = "1", optional = true }
esp-config = { version = "0.5.0", path = "../esp-config" }
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated" }
@ -29,6 +28,12 @@ heapless = "0.8"
semihosting = { version = "0.1.20", optional = true }
document-features = "0.2.11"
[target.'cfg(target_arch = "riscv32")'.dependencies]
riscv = { version = "0.14.0" }
[target.'cfg(target_arch = "xtensa")'.dependencies]
xtensa-lx = { version = "0.12.0", path = "../xtensa-lx" }
[build-dependencies]
esp-config = { version = "0.5.0", path = "../esp-config", features = ["build"] }

View File

@ -201,5 +201,5 @@ fn abort() -> ! {
}
#[allow(unreachable_code)]
critical_section::with(|_| loop {})
arch::interrupt_free(|| loop {})
}

View File

@ -1,5 +1,8 @@
use core::arch::asm;
#[cfg(feature = "panic-handler")]
pub(crate) use riscv::interrupt::free as interrupt_free;
use crate::{Backtrace, BacktraceFrame};
// subtract 4 from the return address

View File

@ -1,5 +1,8 @@
use core::arch::asm;
#[cfg(feature = "panic-handler")]
pub(crate) use xtensa_lx::interrupt::free as interrupt_free;
use crate::{Backtrace, BacktraceFrame};
// subtract 3 from the return address

View File

@ -20,17 +20,17 @@ test = false
[dependencies]
cfg-if = "1.0.0"
critical-section = "1.2.0"
esp-hal = { version = "1.0.0-rc.0", path = "../esp-hal", default-features = false, features = ["requires-unstable"] }
portable-atomic = "1.11.0"
static_cell = "2.1.0"
# Unstable dependencies that are not (strictly) part of the public API
document-features = "0.2.11"
embassy-sync = { version = "0.6.2" }
embassy-sync = { version = "0.7" }
embassy-time-driver = { version = "0.2.0", features = [ "tick-hz-1_000_000" ] }
embassy-time-queue-utils = { version = "0.1.0", features = ["_generic-queue"] }
esp-config = { version = "0.5.0", path = "../esp-config" }
esp-sync = { version = "0.0.0", path = "../esp-sync" }
macros = { version = "0.19.0", features = ["embassy"], package = "esp-hal-procmacros", path = "../esp-hal-procmacros" }
# Optional dependencies that enable ecosystem support.
@ -40,6 +40,9 @@ embassy-executor = { version = "0.7.0", features = ["timer-item-payload
defmt = { version = "1.0.1", optional = true }
log-04 = { package = "log", version = "0.4.27", optional = true }
[target.'cfg(target_arch = "riscv32")'.dependencies]
riscv = { version = "0.14.0" }
[build-dependencies]
esp-config = { version = "0.5.0", path = "../esp-config", features = ["build"] }
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated", features = ["build-script"] }
@ -47,13 +50,13 @@ esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated"
[features]
default = ["executors"]
esp32 = ["esp-hal/esp32"]
esp32c2 = ["esp-hal/esp32c2"]
esp32c3 = ["esp-hal/esp32c3"]
esp32c6 = ["esp-hal/esp32c6"]
esp32h2 = ["esp-hal/esp32h2"]
esp32s2 = ["esp-hal/esp32s2"]
esp32s3 = ["esp-hal/esp32s3"]
esp32 = ["esp-hal/esp32", "esp-sync/esp32"]
esp32c2 = ["esp-hal/esp32c2", "esp-sync/esp32c2"]
esp32c3 = ["esp-hal/esp32c3", "esp-sync/esp32c3"]
esp32c6 = ["esp-hal/esp32c6", "esp-sync/esp32c6"]
esp32h2 = ["esp-hal/esp32h2", "esp-sync/esp32h2"]
esp32s2 = ["esp-hal/esp32s2", "esp-sync/esp32s2"]
esp32s3 = ["esp-hal/esp32s3", "esp-sync/esp32s3"]
## Enable the `Executor` and `InterruptExecutor` embassy executor implementations.
executors = ["dep:embassy-executor", "esp-hal/__esp_hal_embassy"]

View File

@ -224,7 +224,7 @@ This will use software-interrupt 3 which isn't available for anything else to wa
fn wait_impl(cpu: usize) {
// we do not care about race conditions between the load and store operations,
// interrupts will only set this value to true.
critical_section::with(|_| {
riscv::interrupt::free(|| {
// if there is work to do, loop back to polling
if !SIGNAL_WORK_THREAD_MODE[cpu].load(Ordering::Relaxed) {
// if not, wait for interrupt

View File

@ -10,10 +10,10 @@ use embassy_time_driver::Driver;
use esp_hal::{
Blocking,
interrupt::{InterruptHandler, Priority},
sync::Locked,
time::{Duration, Instant},
timer::{Error, OneShotTimer},
};
use esp_sync::NonReentrantMutex;
pub type Timer = OneShotTimer<'static, Blocking>;
@ -67,19 +67,19 @@ struct AlarmInner {
pub state: AlarmState,
}
unsafe impl Send for AlarmInner {}
struct Alarm {
// FIXME: we should be able to use priority-limited locks here, but we can initialize alarms
// while running at an arbitrary priority level. We need to rework alarm allocation to only use
// a critical section to allocate an alarm, but not when using it.
pub inner: Locked<AlarmInner>,
pub inner: NonReentrantMutex<AlarmInner>,
}
unsafe impl Send for Alarm {}
impl Alarm {
pub const fn new(handler: extern "C" fn()) -> Self {
Self {
inner: Locked::new(AlarmInner {
inner: NonReentrantMutex::new(AlarmInner {
#[cfg(not(single_queue))]
context: Cell::new(core::ptr::null_mut()),
state: AlarmState::Created(handler),
@ -110,9 +110,11 @@ pub(super) struct EmbassyTimer {
pub(crate) inner: crate::timer_queue::TimerQueue,
alarms: [Alarm; MAX_SUPPORTED_ALARM_COUNT],
available_timers: Locked<Option<&'static mut [Timer]>>,
available_timers: NonReentrantMutex<Option<&'static mut [Timer]>>,
}
unsafe impl Send for EmbassyTimer {}
/// Repeats the `Alarm::new` constructor for each alarm, creating an interrupt
/// handler for each of them.
macro_rules! alarms {
@ -139,7 +141,7 @@ embassy_time_driver::time_driver_impl!(static DRIVER: EmbassyTimer = EmbassyTime
#[cfg(single_queue)]
inner: crate::timer_queue::TimerQueue::new(Priority::max()),
alarms: alarms!(0, 1, 2, 3, 4, 5, 6),
available_timers: Locked::new(None),
available_timers: NonReentrantMutex::new(None),
});
impl EmbassyTimer {

View File

@ -37,14 +37,15 @@ esp-rom-sys = { version = "0.1.1", path = "../esp-rom-sys" }
bitfield = "0.19.0"
delegate = "0.13.3"
document-features = "0.2.11"
embassy-futures = "0.1.1"
embassy-sync = "0.6.2"
embassy-futures = "0.1"
embassy-sync = "0.7"
fugit = "0.3.7"
instability = "0.3.9"
strum = { version = "0.27.1", default-features = false, features = ["derive"] }
esp-config = { version = "0.5.0", path = "../esp-config" }
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated" }
esp-sync = { version = "0.0.0", path = "../esp-sync" }
procmacros = { version = "0.19.0", package = "esp-hal-procmacros", path = "../esp-hal-procmacros" }
# Dependencies that are optional because they are used by unstable drivers.
@ -127,6 +128,7 @@ esp32 = [
"dep:esp32",
"procmacros/rtc-slow",
"esp-rom-sys/esp32",
"esp-sync/esp32",
"esp-metadata-generated/esp32",
"dep:sha1",
"dep:sha2"
@ -137,6 +139,7 @@ esp32c2 = [
"dep:riscv",
"portable-atomic/unsafe-assume-single-core",
"esp-rom-sys/esp32c2",
"esp-sync/esp32c2",
"esp-metadata-generated/esp32c2",
]
# Target the ESP32-C3.
@ -146,6 +149,7 @@ esp32c3 = [
"esp-riscv-rt/rtc-ram",
"portable-atomic/unsafe-assume-single-core",
"esp-rom-sys/esp32c3",
"esp-sync/esp32c3",
"esp-metadata-generated/esp32c3",
]
# Target the ESP32-C6.
@ -156,6 +160,7 @@ esp32c6 = [
"esp-riscv-rt/has-mie-mip",
"procmacros/has-lp-core",
"esp-rom-sys/esp32c6",
"esp-sync/esp32c6",
"esp-metadata-generated/esp32c6",
]
# Target the ESP32-H2.
@ -165,6 +170,7 @@ esp32h2 = [
"esp-riscv-rt/rtc-ram",
"esp-riscv-rt/has-mie-mip",
"esp-rom-sys/esp32h2",
"esp-sync/esp32h2",
"esp-metadata-generated/esp32h2",
]
# Target the ESP32-S2.
@ -175,6 +181,7 @@ esp32s2 = [
"procmacros/rtc-slow",
"__usb_otg",
"esp-rom-sys/esp32s2",
"esp-sync/esp32s2",
"esp-metadata-generated/esp32s2",
]
# Target the ESP32-S3.
@ -184,6 +191,7 @@ esp32s3 = [
"procmacros/rtc-slow",
"__usb_otg",
"esp-rom-sys/esp32s3",
"esp-sync/esp32s3",
"esp-metadata-generated/esp32s3",
]
@ -230,6 +238,7 @@ defmt = [
"fugit/defmt",
"esp-riscv-rt?/defmt",
"xtensa-lx-rt?/defmt",
"esp-sync/defmt"
]
#! ### PSRAM Feature Flags

View File

@ -2,8 +2,7 @@
use core::task::Waker;
use embassy_sync::waitqueue::GenericAtomicWaker;
use crate::sync::RawMutex;
use esp_sync::RawMutex;
/// Utility struct to register and wake a waker.
pub struct AtomicWaker {

View File

@ -16,7 +16,7 @@ pub(crate) fn esp32c6_rtc_bbpll_configure_raw(_xtal_freq: u32, pll_freq: u32) {
// Do nothing
debug_assert!(pll_freq == 480);
critical_section::with(|_| {
crate::ESP_HAL_LOCK.lock(|| {
// enable i2c mst clk by force on (temporarily)
let was_i2c_mst_en = MODEM_LPCON::regs().clk_conf().read().clk_i2c_mst_en().bit();
MODEM_LPCON::regs()

View File

@ -47,13 +47,16 @@
use core::{cell::Cell, marker::PhantomData};
#[cfg(any(bt, ieee802154, wifi))]
use esp_sync::RawMutex;
#[cfg(bt)]
use crate::peripherals::BT;
#[cfg(all(feature = "unstable", ieee802154))]
use crate::peripherals::IEEE802154;
#[cfg(wifi)]
use crate::peripherals::WIFI;
use crate::{private::Sealed, rtc_cntl::RtcClock, time::Rate};
use crate::{ESP_HAL_LOCK, private::Sealed, rtc_cntl::RtcClock, time::Rate};
#[cfg_attr(esp32, path = "clocks_ll/esp32.rs")]
#[cfg_attr(esp32c2, path = "clocks_ll/esp32c2.rs")]
@ -330,7 +333,7 @@ static mut ACTIVE_CLOCKS: Option<Clocks> = None;
impl Clocks {
pub(crate) fn init(cpu_clock_speed: CpuClock) {
critical_section::with(|_| {
ESP_HAL_LOCK.lock(|| {
crate::rtc_cntl::rtc::init();
let config = Self::configure(cpu_clock_speed);
@ -610,13 +613,12 @@ impl Clocks {
#[cfg(any(bt, ieee802154, wifi))]
/// Tracks the number of references to the PHY clock.
static PHY_CLOCK_REF_COUNTER: critical_section::Mutex<Cell<u8>> =
critical_section::Mutex::new(Cell::new(0));
static PHY_CLOCK_REF_COUNTER: embassy_sync::blocking_mutex::Mutex<RawMutex, Cell<u8>> =
embassy_sync::blocking_mutex::Mutex::new(Cell::new(0));
#[cfg(any(bt, ieee802154, wifi))]
fn increase_phy_clock_ref_count_internal() {
critical_section::with(|cs| {
let phy_clock_ref_counter = PHY_CLOCK_REF_COUNTER.borrow(cs);
PHY_CLOCK_REF_COUNTER.lock(|phy_clock_ref_counter| {
let phy_clock_ref_count = phy_clock_ref_counter.get();
if phy_clock_ref_count == 0 {
@ -624,14 +626,12 @@ fn increase_phy_clock_ref_count_internal() {
}
phy_clock_ref_counter.set(phy_clock_ref_count + 1);
});
})
}
#[cfg(any(bt, ieee802154, wifi))]
fn decrease_phy_clock_ref_count_internal() {
critical_section::with(|cs| {
let phy_clock_ref_counter = PHY_CLOCK_REF_COUNTER.borrow(cs);
PHY_CLOCK_REF_COUNTER.lock(|phy_clock_ref_counter| {
let new_phy_clock_ref_count = unwrap!(
phy_clock_ref_counter.get().checked_sub(1),
"PHY clock ref count underflowed. Either you forgot a PhyClockGuard, or used ModemClockController::decrease_phy_clock_ref_count incorrectly."
@ -641,7 +641,7 @@ fn decrease_phy_clock_ref_count_internal() {
clocks_ll::enable_phy(false);
}
phy_clock_ref_counter.set(new_phy_clock_ref_count);
});
})
}
#[inline]

View File

@ -16,8 +16,6 @@
use core::marker::PhantomData;
use critical_section::CriticalSection;
use crate::{
dma::*,
handler,
@ -791,7 +789,7 @@ crate::dma::impl_dma_eligible! {
}
}
pub(super) fn init_dma(_cs: CriticalSection<'_>) {
pub(super) fn init_dma_racey() {
DMA::regs()
.misc_conf()
.modify(|_, w| w.ahbm_rst_inter().set_bit());

View File

@ -1741,7 +1741,7 @@ cfg_if::cfg_if! {
fn create_guard(_ch: &impl RegisterAccess) -> PeripheralGuard {
// NOTE(p4): this function will read the channel's DMA peripheral from `_ch`
system::GenericPeripheralGuard::new_with(init_dma)
system::GenericPeripheralGuard::new_with(init_dma_racey)
}
// DMA receive channel

View File

@ -11,7 +11,6 @@
//! [SPI]: ../spi/index.html
//! [I2S]: ../i2s/index.html
use critical_section::CriticalSection;
use portable_atomic::AtomicBool;
use crate::{
@ -180,7 +179,7 @@ crate::dma::impl_dma_eligible!([DMA_CRYPTO] AES => Aes);
#[cfg(esp32s2)]
crate::dma::impl_dma_eligible!([DMA_CRYPTO] SHA => Sha);
pub(super) fn init_dma(_cs: CriticalSection<'_>) {
pub(super) fn init_dma_racey() {
#[cfg(esp32)]
{
// (only) on ESP32 we need to configure DPORT for the SPI DMA channels

View File

@ -74,6 +74,7 @@ mod placeholder;
use core::fmt::Display;
use esp_sync::RawMutex;
pub use placeholder::NoPin;
use portable_atomic::AtomicU32;
use strum::EnumCount;
@ -83,7 +84,6 @@ use crate::{
interrupt::{InterruptHandler, Priority},
peripherals::{GPIO, IO_MUX, Interrupt},
private::{self, Sealed},
sync::RawMutex,
};
define_io_mux_signals!();

View File

@ -218,6 +218,7 @@ metadata!("build_info", CHIP_NAME, chip!());
#[cfg_attr(docsrs, doc(cfg(all(feature = "unstable", feature = "rt"))))]
#[cfg_attr(not(feature = "unstable"), doc(hidden))]
pub use esp_riscv_rt::{self, riscv};
use esp_sync::RawMutex;
pub(crate) use peripherals::pac;
#[cfg(xtensa)]
#[cfg(all(xtensa, feature = "rt"))]
@ -586,6 +587,9 @@ use crate::config::{WatchdogConfig, WatchdogStatus};
#[cfg(feature = "rt")]
use crate::{clock::Clocks, peripherals::Peripherals};
/// A spinlock for seldom called stuff. Users assume that lock contention is not an issue.
pub(crate) static ESP_HAL_LOCK: RawMutex = RawMutex::new();
/// System configuration.
///
/// This `struct` is marked with `#[non_exhaustive]` and can't be instantiated

View File

@ -11,7 +11,7 @@
use core::marker::PhantomData;
use critical_section::CriticalSection;
use esp_sync::RawMutex;
use crate::{pcnt::channel::Channel, peripherals::PCNT, system::GenericPeripheralGuard};
@ -82,6 +82,7 @@ pub struct Unit<'d, const NUM: usize> {
pub channel0: Channel<'d, NUM, 0>,
/// The second channel in PCNT unit.
pub channel1: Channel<'d, NUM, 1>,
mutex: RawMutex,
}
impl<const NUM: usize> Unit<'_, NUM> {
@ -91,6 +92,7 @@ impl<const NUM: usize> Unit<'_, NUM> {
counter: Counter::new(),
channel0: Channel::new(),
channel1: Channel::new(),
mutex: RawMutex::new(),
}
}
@ -216,7 +218,7 @@ impl<const NUM: usize> Unit<'_, NUM> {
/// Resets the counter value to zero.
pub fn clear(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.ctrl().modify(|_, w| w.cnt_rst_u(NUM as u8).set_bit());
// TODO: does this need a delay? (liebman / Jan 2 2023)
pcnt.ctrl()
@ -227,7 +229,7 @@ impl<const NUM: usize> Unit<'_, NUM> {
/// Pause the counter
pub fn pause(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.ctrl()
.modify(|_, w| w.cnt_pause_u(NUM as u8).set_bit());
});
@ -236,7 +238,7 @@ impl<const NUM: usize> Unit<'_, NUM> {
/// Resume the counter
pub fn resume(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.ctrl()
.modify(|_, w| w.cnt_pause_u(NUM as u8).clear_bit());
});
@ -265,16 +267,16 @@ impl<const NUM: usize> Unit<'_, NUM> {
/// Enable interrupts for this unit.
pub fn listen(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.int_ena()
.modify(|_, w| w.cnt_thr_event_u(NUM as u8).set_bit());
});
}
/// Disable interrupts for this unit.
pub fn unlisten(&self, _cs: CriticalSection<'_>) {
pub fn unlisten(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.int_ena()
.modify(|_, w| w.cnt_thr_event_u(NUM as u8).clear_bit());
});
@ -289,7 +291,7 @@ impl<const NUM: usize> Unit<'_, NUM> {
/// Clear the interrupt bit for this unit.
pub fn reset_interrupt(&self) {
let pcnt = PCNT::regs();
critical_section::with(|_cs| {
self.mutex.lock(|| {
pcnt.int_clr()
.write(|w| w.cnt_thr_event_u(NUM as u8).set_bit());
});

View File

@ -195,7 +195,7 @@ for_each_peripheral! {
#[unsafe(no_mangle)]
static mut _ESP_HAL_DEVICE_PERIPHERALS: bool = false;
critical_section::with(|_| unsafe {
crate::ESP_HAL_LOCK.lock(|| unsafe {
if _ESP_HAL_DEVICE_PERIPHERALS {
panic!("init called more than once!")
}

View File

@ -1,4 +1,6 @@
use crate::{clock::Clocks, peripherals::RNG, sync::Locked};
use esp_sync::NonReentrantMutex;
use crate::{clock::Clocks, peripherals::RNG};
// TODO: find a better place for these
#[inline]
@ -49,7 +51,7 @@ fn current_cpu_cycles() -> usize {
}
}
static LAST_READ: Locked<usize> = Locked::new(0);
static LAST_READ: NonReentrantMutex<usize> = NonReentrantMutex::new(0);
fn read_one(wait_cycles: usize) -> u32 {
loop {

View File

@ -1,409 +1,61 @@
//! Under construction: This is public only for tests, please avoid using it
//! directly.
#[cfg(single_core)]
use core::cell::Cell;
use core::cell::UnsafeCell;
use core::sync::atomic::{Ordering, compiler_fence};
#[cfg(riscv)]
use esp_sync::raw::SingleCoreInterruptLock;
use esp_sync::{GenericRawMutex, RestoreState, raw::RawLock};
use crate::interrupt::Priority;
/// Opaque token that can be used to release a lock.
// The interpretation of this value depends on the lock type that created it,
// but bit #31 is reserved for the reentry flag.
//
// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
// use bit #31 as our reentry flag.
// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
// undefined behavior: Quoting the ISA summary, table 64:
// Writing a non-zero value to these fields results in undefined processor
// behavior.
//
// Risc-V: we either get the restore state from bit 3 of mstatus, or
// we create the restore state from the current Priority, which is at most 31.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct RestoreState(u32);
impl RestoreState {
const REENTRY_FLAG: u32 = 1 << 31;
/// A lock that disables interrupts below a certain priority.
pub struct PriorityLock(pub Priority);
fn mark_reentry(&mut self) {
self.0 |= Self::REENTRY_FLAG;
impl PriorityLock {
fn current_priority() -> Priority {
crate::interrupt::current_runlevel()
}
fn is_reentry(&self) -> bool {
self.0 & Self::REENTRY_FLAG != 0
/// Prevents interrupts above `level` from firing and returns the
/// current run level.
unsafe fn change_current_level(level: Priority) -> Priority {
unsafe { crate::interrupt::change_current_runlevel(level) }
}
}
impl From<Priority> for RestoreState {
fn from(priority: Priority) -> Self {
Self(priority as _)
}
}
impl RawLock for PriorityLock {
unsafe fn enter(&self) -> RestoreState {
#[cfg(riscv)]
if self.0 == Priority::max() {
return unsafe { SingleCoreInterruptLock.enter() };
}
impl TryFrom<RestoreState> for Priority {
type Error = crate::interrupt::Error;
let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
assert!(prev_interrupt_priority <= self.0);
fn try_from(token: RestoreState) -> Result<Self, Self::Error> {
Self::try_from(token.0)
}
}
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
mod single_core {
use core::sync::atomic::{Ordering, compiler_fence};
use super::RestoreState;
use crate::interrupt::Priority;
/// Trait for single-core locks.
pub trait RawLock {
unsafe fn enter(&self) -> RestoreState;
unsafe fn exit(&self, token: RestoreState);
unsafe { RestoreState::new(prev_interrupt_priority as _) }
}
/// A lock that disables interrupts below a certain priority.
pub struct PriorityLock(pub Priority);
impl PriorityLock {
fn current_priority() -> Priority {
crate::interrupt::current_runlevel()
unsafe fn exit(&self, token: RestoreState) {
#[cfg(riscv)]
if self.0 == Priority::max() {
return unsafe { SingleCoreInterruptLock.exit(token) };
}
assert!(Self::current_priority() <= self.0);
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
/// Prevents interrupts above `level` from firing and returns the
/// current run level.
unsafe fn change_current_level(level: Priority) -> Priority {
unsafe { crate::interrupt::change_current_runlevel(level) }
}
}
impl RawLock for PriorityLock {
unsafe fn enter(&self) -> RestoreState {
#[cfg(riscv)]
if self.0 == Priority::max() {
return unsafe { InterruptLock.enter() };
}
let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
assert!(prev_interrupt_priority <= self.0);
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
RestoreState::from(prev_interrupt_priority)
}
unsafe fn exit(&self, token: RestoreState) {
#[cfg(riscv)]
if self.0 == Priority::max() {
return unsafe { InterruptLock.exit(token) };
}
assert!(Self::current_priority() <= self.0);
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
let priority = unwrap!(Priority::try_from(token));
unsafe {
Self::change_current_level(priority);
}
}
}
/// A lock that disables interrupts.
pub struct InterruptLock;
impl RawLock for InterruptLock {
unsafe fn enter(&self) -> RestoreState {
cfg_if::cfg_if! {
if #[cfg(riscv)] {
let mut mstatus = 0u32;
unsafe {core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);}
let token = mstatus & 0b1000;
} else if #[cfg(xtensa)] {
let token: u32;
unsafe {core::arch::asm!("rsil {0}, 5", out(reg) token);}
} else {
compile_error!("Unsupported architecture")
}
};
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
RestoreState(token)
}
unsafe fn exit(&self, token: RestoreState) {
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
let RestoreState(token) = token;
cfg_if::cfg_if! {
if #[cfg(riscv)] {
if token != 0 {
unsafe {
riscv::interrupt::enable();
}
}
} else if #[cfg(xtensa)] {
// Reserved bits in the PS register, these must be written as 0.
const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
debug_assert!(token & RESERVED_MASK == 0);
unsafe {
core::arch::asm!(
"wsr.ps {0}",
"rsync", in(reg) token)
}
} else {
compile_error!("Unsupported architecture")
}
}
}
}
}
#[cfg(multi_core)]
mod multicore {
use portable_atomic::{AtomicUsize, Ordering};
// Safety: Ensure that when adding new chips `raw_core` doesn't return this
// value.
// FIXME: ensure in HIL tests this is the case!
const UNUSED_THREAD_ID_VALUE: usize = 0x100;
pub fn thread_id() -> usize {
crate::system::raw_core()
}
pub(super) struct AtomicLock {
owner: AtomicUsize,
}
impl AtomicLock {
pub const fn new() -> Self {
Self {
owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
}
}
pub fn is_owned_by_current_thread(&self) -> bool {
self.is_owned_by(thread_id())
}
pub fn is_owned_by(&self, thread: usize) -> bool {
self.owner.load(Ordering::Relaxed) == thread
}
pub fn try_lock(&self, new_owner: usize) -> Result<(), usize> {
self.owner
.compare_exchange(
UNUSED_THREAD_ID_VALUE,
new_owner,
Ordering::Acquire,
Ordering::Relaxed,
)
.map(|_| ())
}
/// # Safety:
///
/// This function must only be called if the lock was acquired by the
/// current thread.
pub unsafe fn unlock(&self) {
debug_assert!(self.is_owned_by_current_thread());
self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
}
}
}
/// A generic lock that wraps [`single_core::RawLock`] and
/// [`multicore::AtomicLock`] and tracks whether the caller has locked
/// recursively.
struct GenericRawMutex<L: single_core::RawLock> {
lock: L,
#[cfg(multi_core)]
inner: multicore::AtomicLock,
#[cfg(single_core)]
is_locked: Cell<bool>,
}
unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
impl<L: single_core::RawLock> GenericRawMutex<L> {
/// Create a new lock.
pub const fn new(lock: L) -> Self {
Self {
lock,
#[cfg(multi_core)]
inner: multicore::AtomicLock::new(),
#[cfg(single_core)]
is_locked: Cell::new(false),
}
}
/// Acquires the lock.
///
/// # Safety
///
/// - Each release call must be paired with an acquire call.
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
unsafe fn acquire(&self) -> RestoreState {
cfg_if::cfg_if! {
if #[cfg(single_core)] {
let mut tkn = unsafe { self.lock.enter() };
let was_locked = self.is_locked.replace(true);
if was_locked {
tkn.mark_reentry();
}
tkn
} else if #[cfg(multi_core)] {
// We acquire the lock inside an interrupt-free context to prevent a subtle
// race condition:
// In case an interrupt handler tries to lock the same resource, it could win if
// the current thread is holding the lock but isn't yet in interrupt-free context.
// If we maintain non-reentrant semantics, this situation would panic.
// If we allow reentrancy, the interrupt handler would technically be a different
// context with the same `current_thread_id`, so it would be allowed to lock the
// resource in a theoretically incorrect way.
let try_lock = |current_thread_id| {
let mut tkn = unsafe { self.lock.enter() };
match self.inner.try_lock(current_thread_id) {
Ok(()) => Some(tkn),
Err(owner) if owner == current_thread_id => {
tkn.mark_reentry();
Some(tkn)
}
Err(_) => {
unsafe { self.lock.exit(tkn) };
None
}
}
};
let current_thread_id = multicore::thread_id();
loop {
if let Some(token) = try_lock(current_thread_id) {
return token;
}
}
}
}
}
/// Releases the lock.
///
/// # Safety
///
/// - This function must only be called if the lock was acquired by the current thread.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
/// - Each release call must be paired with an acquire call.
unsafe fn release(&self, token: RestoreState) {
let priority = unwrap!(Priority::try_from(token.inner()));
unsafe {
if !token.is_reentry() {
#[cfg(multi_core)]
self.inner.unlock();
#[cfg(single_core)]
self.is_locked.set(false);
self.lock.exit(token)
}
Self::change_current_level(priority);
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new_non_reentrant(self);
f()
}
/// Runs the callback with this lock locked.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new_reentrant(self);
f()
}
}
/// A mutual exclusion primitive.
///
/// This lock disables interrupts on the current core while locked.
#[cfg_attr(
multi_core,
doc = r#"It needs a bit of memory, but it does not take a global critical
section, making it preferrable for use in multi-core systems."#
)]
pub struct RawMutex {
inner: GenericRawMutex<single_core::InterruptLock>,
}
impl Default for RawMutex {
fn default() -> Self {
Self::new()
}
}
impl RawMutex {
/// Create a new lock.
pub const fn new() -> Self {
Self {
inner: GenericRawMutex::new(single_core::InterruptLock),
}
}
/// Acquires the lock.
///
/// # Safety
///
/// - Each release call must be paired with an acquire call.
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
pub unsafe fn acquire(&self) -> RestoreState {
unsafe { self.inner.acquire() }
}
/// Releases the lock.
///
/// # Safety
///
/// - This function must only be called if the lock was acquired by the current thread.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
/// - Each release call must be paired with an acquire call.
pub unsafe fn release(&self, token: RestoreState) {
unsafe {
self.inner.release(token);
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock_non_reentrant(f)
}
/// Runs the callback with this lock locked.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
/// A mutual exclusion primitive that only disables a limited range of
@ -411,14 +63,14 @@ unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
///
/// Trying to acquire or release the lock at a higher priority level will panic.
pub struct RawPriorityLimitedMutex {
inner: GenericRawMutex<single_core::PriorityLock>,
inner: GenericRawMutex<PriorityLock>,
}
impl RawPriorityLimitedMutex {
/// Create a new lock that is accessible at or below the given `priority`.
pub const fn new(priority: Priority) -> Self {
Self {
inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
inner: GenericRawMutex::new(PriorityLock(priority)),
}
}
@ -437,81 +89,22 @@ unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMu
}
}
/// Data protected by a [RawMutex].
///
/// This is largely equivalent to a `Mutex<RefCell<T>>`, but accessing the inner
/// data doesn't hold a critical section on multi-core systems.
pub struct Locked<T> {
lock_state: RawMutex,
data: UnsafeCell<T>,
}
impl<T> Locked<T> {
/// Create a new instance
pub const fn new(data: T) -> Self {
Self {
lock_state: RawMutex::new(),
data: UnsafeCell::new(data),
}
}
/// Provide exclusive access to the protected data to the given closure.
///
/// Calling this reentrantly will panic.
pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
self.lock_state
.lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
}
}
unsafe impl<T> Sync for Locked<T> {}
struct LockGuard<'a, L: single_core::RawLock> {
lock: &'a GenericRawMutex<L>,
token: RestoreState,
}
impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
let this = Self::new_reentrant(lock);
assert!(!this.token.is_reentry(), "lock is not reentrant");
this
}
fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
let token = unsafe {
// SAFETY: the same lock will be released when dropping the guard.
// This ensures that the lock is released on the same thread, in the reverse
// order it was acquired.
lock.acquire()
};
Self { lock, token }
}
}
impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
fn drop(&mut self) {
unsafe { self.lock.release(self.token) };
}
}
#[cfg(impl_critical_section)]
mod critical_section {
struct CriticalSection;
critical_section::set_impl!(CriticalSection);
static CRITICAL_SECTION: super::RawMutex = super::RawMutex::new();
static CRITICAL_SECTION: esp_sync::RawMutex = esp_sync::RawMutex::new();
unsafe impl critical_section::Impl for CriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
unsafe { CRITICAL_SECTION.acquire().0 }
unsafe { CRITICAL_SECTION.acquire().inner() }
}
unsafe fn release(token: critical_section::RawRestoreState) {
unsafe {
CRITICAL_SECTION.release(super::RestoreState(token));
CRITICAL_SECTION.release(esp_sync::RestoreState::new(token));
}
}
}

View File

@ -1,8 +1,6 @@
//! # System Control
use core::cell::RefCell;
use critical_section::{CriticalSection, Mutex};
use esp_sync::NonReentrantMutex;
use crate::peripherals::SYSTEM;
@ -225,8 +223,20 @@ impl Peripheral {
}
}
static PERIPHERAL_REF_COUNT: Mutex<RefCell<[usize; Peripheral::COUNT]>> =
Mutex::new(RefCell::new([0; Peripheral::COUNT]));
struct RefCounts {
counts: [usize; Peripheral::COUNT],
}
impl RefCounts {
pub const fn new() -> Self {
Self {
counts: [0; Peripheral::COUNT],
}
}
}
static PERIPHERAL_REF_COUNT: NonReentrantMutex<RefCounts> =
NonReentrantMutex::new(RefCounts::new());
/// Disable all peripherals.
///
@ -234,12 +244,12 @@ static PERIPHERAL_REF_COUNT: Mutex<RefCell<[usize; Peripheral::COUNT]>> =
#[cfg_attr(not(feature = "rt"), expect(dead_code))]
pub(crate) fn disable_peripherals() {
// Take the critical section up front to avoid taking it multiple times.
critical_section::with(|cs| {
PERIPHERAL_REF_COUNT.with(|refcounts| {
for p in Peripheral::ALL {
if Peripheral::KEEP_ENABLED.contains(p) {
continue;
}
PeripheralClockControl::enable_forced_with_cs(*p, false, true, cs);
PeripheralClockControl::enable_forced_with_counts(*p, false, true, refcounts);
}
})
}
@ -278,22 +288,22 @@ impl Drop for PeripheralGuard {
pub(crate) struct GenericPeripheralGuard<const P: u8> {}
impl<const P: u8> GenericPeripheralGuard<P> {
pub(crate) fn new_with(init: fn(CriticalSection<'_>)) -> Self {
pub(crate) fn new_with(init: fn()) -> Self {
let peripheral = unwrap!(Peripheral::try_from(P));
critical_section::with(|cs| {
if !Peripheral::KEEP_ENABLED.contains(&peripheral)
&& PeripheralClockControl::enable_with_cs(peripheral, cs)
{
PeripheralClockControl::reset(peripheral);
init(cs);
}
});
if !Peripheral::KEEP_ENABLED.contains(&peripheral) {
PERIPHERAL_REF_COUNT.with(|ref_counts| {
if PeripheralClockControl::enable_with_counts(peripheral, ref_counts) {
unsafe { PeripheralClockControl::reset_racey(peripheral) };
init();
}
});
}
Self {}
}
pub(crate) fn new() -> Self {
Self::new_with(|_| {})
Self::new_with(|| {})
}
}
@ -321,7 +331,7 @@ pub(crate) struct PeripheralClockControl;
#[cfg(not(any(esp32c6, esp32h2)))]
impl PeripheralClockControl {
fn enable_internal(peripheral: Peripheral, enable: bool, _cs: CriticalSection<'_>) {
unsafe fn enable_internal_racey(peripheral: Peripheral, enable: bool) {
debug!("Enable {:?} {}", peripheral, enable);
let system = SYSTEM::regs();
@ -486,19 +496,11 @@ impl PeripheralClockControl {
}
}
}
/// Resets the given peripheral
pub(crate) fn reset(peripheral: Peripheral) {
debug!("Reset {:?}", peripheral);
assert_peri_reset(peripheral, true);
assert_peri_reset(peripheral, false);
}
}
#[cfg(any(esp32c6, esp32h2))]
impl PeripheralClockControl {
fn enable_internal(peripheral: Peripheral, enable: bool, _cs: CriticalSection<'_>) {
unsafe fn enable_internal_racey(peripheral: Peripheral, enable: bool) {
debug!("Enable {:?} {}", peripheral, enable);
let system = SYSTEM::regs();
@ -669,19 +671,11 @@ impl PeripheralClockControl {
}
}
}
/// Resets the given peripheral
pub(crate) fn reset(peripheral: Peripheral) {
debug!("Reset {:?}", peripheral);
assert_peri_reset(peripheral, true);
assert_peri_reset(peripheral, false);
}
}
#[cfg(not(any(esp32c6, esp32h2)))]
/// Resets the given peripheral
pub(crate) fn assert_peri_reset(peripheral: Peripheral, reset: bool) {
unsafe fn assert_peri_reset_racey(peripheral: Peripheral, reset: bool) {
let system = SYSTEM::regs();
#[cfg(esp32)]
@ -692,7 +686,7 @@ pub(crate) fn assert_peri_reset(peripheral: Peripheral, reset: bool) {
#[cfg(any(esp32c2, esp32c3, esp32s2, esp32s3))]
let perip_rst_en1 = system.perip_rst_en1();
critical_section::with(|_cs| match peripheral {
match peripheral {
#[cfg(soc_has_spi2)]
Peripheral::Spi2 => {
perip_rst_en0.modify(|_, w| w.spi2_rst().bit(reset));
@ -845,13 +839,17 @@ pub(crate) fn assert_peri_reset(peripheral: Peripheral, reset: bool) {
Peripheral::Uhci0 => {
perip_rst_en0.modify(|_, w| w.uhci0_rst().bit(reset));
}
});
}
}
#[cfg(any(esp32c6, esp32h2))]
fn assert_peri_reset(peripheral: Peripheral, reset: bool) {
unsafe fn assert_peri_reset_racey(peripheral: Peripheral, reset: bool) {
let system = SYSTEM::regs();
// No need to lock, different peripherals' bits are in separate registers. In theory this may
// race with accessing the clk_enable bits, but the peripheral singleton pattern, as well as the
// general usage patterns of this code should prevent that.
match peripheral {
#[cfg(soc_has_spi2)]
Peripheral::Spi2 => {
@ -992,7 +990,7 @@ impl PeripheralClockControl {
///
/// Returns `true` if it actually enabled the peripheral.
pub(crate) fn enable(peripheral: Peripheral) -> bool {
Self::enable_forced(peripheral, true, false)
PERIPHERAL_REF_COUNT.with(|ref_counts| Self::enable_with_counts(peripheral, ref_counts))
}
/// Enables the given peripheral.
@ -1001,8 +999,8 @@ impl PeripheralClockControl {
/// is only enabled with the first call attempt to enable it.
///
/// Returns `true` if it actually enabled the peripheral.
pub(crate) fn enable_with_cs(peripheral: Peripheral, cs: CriticalSection<'_>) -> bool {
Self::enable_forced_with_cs(peripheral, true, false, cs)
fn enable_with_counts(peripheral: Peripheral, ref_counts: &mut RefCounts) -> bool {
Self::enable_forced_with_counts(peripheral, true, false, ref_counts)
}
/// Disables the given peripheral.
@ -1014,21 +1012,18 @@ impl PeripheralClockControl {
///
/// Before disabling a peripheral it will also get reset
pub(crate) fn disable(peripheral: Peripheral) -> bool {
Self::enable_forced(peripheral, false, false)
PERIPHERAL_REF_COUNT.with(|ref_counts| {
Self::enable_forced_with_counts(peripheral, false, false, ref_counts)
})
}
pub(crate) fn enable_forced(peripheral: Peripheral, enable: bool, force: bool) -> bool {
critical_section::with(|cs| Self::enable_forced_with_cs(peripheral, enable, force, cs))
}
pub(crate) fn enable_forced_with_cs(
fn enable_forced_with_counts(
peripheral: Peripheral,
enable: bool,
force: bool,
cs: CriticalSection<'_>,
ref_counts: &mut RefCounts,
) -> bool {
let mut ref_counts = PERIPHERAL_REF_COUNT.borrow_ref_mut(cs);
let ref_count = &mut ref_counts[peripheral as usize];
let ref_count = &mut ref_counts.counts[peripheral as usize];
if !force {
let prev = *ref_count;
if enable {
@ -1050,13 +1045,28 @@ impl PeripheralClockControl {
}
if !enable {
Self::reset(peripheral);
unsafe { Self::reset_racey(peripheral) };
}
Self::enable_internal(peripheral, enable, cs);
unsafe { Self::enable_internal_racey(peripheral, enable) };
true
}
/// Resets the given peripheral
pub(crate) unsafe fn reset_racey(peripheral: Peripheral) {
debug!("Reset {:?}", peripheral);
unsafe {
assert_peri_reset_racey(peripheral, true);
assert_peri_reset_racey(peripheral, false);
}
}
/// Resets the given peripheral
pub(crate) fn reset(peripheral: Peripheral) {
PERIPHERAL_REF_COUNT.with(|_| unsafe { Self::reset_racey(peripheral) })
}
}
#[cfg(any(esp32, esp32s3))]

View File

@ -19,12 +19,13 @@
use core::{fmt::Debug, marker::PhantomData};
use esp_sync::RawMutex;
use super::{Error, Timer as _};
use crate::{
asynch::AtomicWaker,
interrupt::{self, InterruptHandler},
peripherals::{Interrupt, SYSTIMER},
sync::RawMutex,
system::{Cpu, Peripheral as PeripheralEnable, PeripheralClockControl},
time::{Duration, Instant},
};

View File

@ -93,7 +93,7 @@ cfg_if::cfg_if! {
// and S2 where the effective interrupt enable register (config) is not shared between
// the timers.
if #[cfg(all(timergroup_timg_has_timer1, not(any(esp32, esp32s2))))] {
use crate::sync::RawMutex;
use esp_sync::RawMutex;
static INT_ENA_LOCK: [RawMutex; NUM_TIMG] = [const { RawMutex::new() }; NUM_TIMG];
}
}

View File

@ -15,8 +15,7 @@
use core::{future::poll_fn, marker::PhantomData, ptr::NonNull, task::Context};
use embassy_sync::waitqueue::WakerRegistration;
use crate::sync::Locked;
use esp_sync::NonReentrantMutex;
/// Queue driver operations.
///
@ -80,6 +79,9 @@ struct Inner<T: Sync + Send> {
suspend_waker: WakerRegistration,
}
unsafe impl<T: Sync + Send> Send for Inner<T> {}
unsafe impl<T: Sync + Send> Sync for Inner<T> {}
impl<T: Sync + Send> Inner<T> {
/// Places a work item at the end of the queue.
fn enqueue(&mut self, ptr: NonNull<WorkItem<T>>) {
@ -337,14 +339,14 @@ impl<T: Sync + Send> Inner<T> {
/// A generic work queue.
pub(crate) struct WorkQueue<T: Sync + Send> {
inner: Locked<Inner<T>>,
inner: NonReentrantMutex<Inner<T>>,
}
impl<T: Sync + Send> WorkQueue<T> {
/// Creates a new `WorkQueue`.
pub const fn new() -> Self {
Self {
inner: Locked::new(Inner {
inner: NonReentrantMutex::new(Inner {
head: None,
tail: None,
current: None,

12
esp-preempt/CHANGELOG.md Normal file
View File

@ -0,0 +1,12 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Initial release (#3855)

View File

@ -30,6 +30,7 @@ allocator-api2 = { version = "0.3.0", default-features = false, features = ["all
document-features = "0.2.11"
esp-alloc = { version = "0.8.0", path = "../esp-alloc", optional = true }
esp-config = { version = "0.5.0", path = "../esp-config" }
esp-sync = { version = "0.0.0", path = "../esp-sync" }
esp-radio-preempt-driver = { version = "0.0.1", path = "../esp-radio-preempt-driver" }
portable-atomic = { version = "1.11.0", default-features = false }

View File

@ -31,10 +31,10 @@ use core::ffi::c_void;
use allocator_api2::boxed::Box;
use esp_hal::{
Blocking,
sync::Locked,
time::{Duration, Instant, Rate},
timer::{AnyTimer, PeriodicTimer},
};
use esp_sync::NonReentrantMutex;
use crate::{task::Context, timer::TIMER};
@ -113,6 +113,8 @@ struct SchedulerState {
to_delete: *mut Context,
}
unsafe impl Send for SchedulerState {}
impl SchedulerState {
const fn new() -> Self {
Self {
@ -233,7 +235,8 @@ fn usleep(us: u32) {
}
}
static SCHEDULER_STATE: Locked<SchedulerState> = Locked::new(SchedulerState::new());
static SCHEDULER_STATE: NonReentrantMutex<SchedulerState> =
NonReentrantMutex::new(SchedulerState::new());
struct Scheduler {}

View File

@ -3,15 +3,13 @@
mod arch_specific;
pub(crate) use arch_specific::*;
use esp_hal::{
interrupt::{InterruptHandler, Priority},
sync::Locked,
};
use esp_hal::interrupt::{InterruptHandler, Priority};
use esp_sync::NonReentrantMutex;
use crate::TimeBase;
/// The timer responsible for time slicing.
pub(crate) static TIMER: Locked<Option<TimeBase>> = Locked::new(None);
pub(crate) static TIMER: NonReentrantMutex<Option<TimeBase>> = NonReentrantMutex::new(None);
pub(crate) fn initialized() -> bool {
TIMER.with(|timer| timer.is_some())

View File

@ -23,20 +23,22 @@ test = false
[dependencies]
document-features = "0.2.11"
# Unstable dependencies that are not (strictly) part of the public API
esp-sync = { version = "0.0.0", path = "../esp-sync", optional = true }
# Optional dependencies
critical-section = { version = "1.2.0", optional = true }
portable-atomic = { version = "1.11.0", optional = true, default-features = false }
# Logging interfaces, they are mutually exclusive so they need to be behind separate features.
defmt = { version = "1.0.1", optional = true }
log-04 = { package = "log", version = "0.4.27", optional = true }
defmt = { version = "1.0.1", optional = true }
log-04 = { package = "log", version = "0.4.27", optional = true }
[build-dependencies]
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated", features = ["build-script"] }
log-04 = { package = "log", version = "0.4.27" }
log-04 = { package = "log", version = "0.4.27" }
[features]
default = ["auto", "colors", "critical-section"]
default = ["auto", "colors", "critical-section"]
esp32 = ["esp-metadata-generated/esp32"]
esp32c2 = ["esp-metadata-generated/esp32c2"]
@ -47,7 +49,7 @@ esp32s2 = ["esp-metadata-generated/esp32s2"]
esp32s3 = ["esp-metadata-generated/esp32s3"]
## Use a critical section around print calls. This ensures that the output is consistent.
critical-section = ["dep:critical-section"]
critical-section = ["dep:esp-sync"]
## Prints the timestamp in the log message.
##
## This option requires the following function to be implemented:

View File

@ -2,9 +2,11 @@
// Implementation taken from defmt-rtt, with a custom framing prefix
#[cfg(feature = "critical-section")]
use critical_section::RestoreState;
use esp_sync::RestoreState;
use super::{LockToken, PrinterImpl};
#[cfg(feature = "critical-section")]
use crate::LOCK;
/// Global logger lock.
#[cfg(feature = "critical-section")]
@ -20,7 +22,7 @@ unsafe impl defmt::Logger for Logger {
#[cfg(feature = "critical-section")]
unsafe {
// safety: Must be paired with corresponding call to release(), see below
let restore = critical_section::acquire();
let restore = LOCK.acquire();
// safety: accessing the `static mut` is OK because we have acquired a critical
// section.
@ -70,7 +72,7 @@ unsafe impl defmt::Logger for Logger {
let restore = CS_RESTORE;
// safety: Must be paired with corresponding call to acquire(), see above
critical_section::release(restore);
LOCK.release(restore);
}
}
}

View File

@ -469,36 +469,26 @@ mod uart_printer {
}
}
#[cfg(not(feature = "critical-section"))]
use core::marker::PhantomData;
#[cfg(not(feature = "critical-section"))]
type LockInner<'a> = PhantomData<&'a ()>;
#[cfg(feature = "critical-section")]
type LockInner<'a> = critical_section::CriticalSection<'a>;
#[derive(Clone, Copy)]
struct LockToken<'a>(LockInner<'a>);
struct LockToken<'a>(PhantomData<&'a ()>);
impl LockToken<'_> {
#[allow(unused)]
unsafe fn conjure() -> Self {
unsafe {
#[cfg(feature = "critical-section")]
let inner = critical_section::CriticalSection::new();
#[cfg(not(feature = "critical-section"))]
let inner = PhantomData;
LockToken(inner)
}
LockToken(PhantomData)
}
}
#[cfg(feature = "critical-section")]
static LOCK: esp_sync::RawMutex = esp_sync::RawMutex::new();
/// Runs the callback in a critical section, if enabled.
#[inline]
fn with<R>(f: impl FnOnce(LockToken) -> R) -> R {
#[cfg(feature = "critical-section")]
return critical_section::with(|cs| f(LockToken(cs)));
return LOCK.lock(|| f(unsafe { LockToken::conjure() }));
#[cfg(not(feature = "critical-section"))]
f(unsafe { LockToken::conjure() })

View File

@ -16,7 +16,6 @@ test = false
[dependencies]
esp-hal = { version = "1.0.0-rc.0", path = "../esp-hal", default-features = false, features = ["requires-unstable"] }
critical-section = "1.2.0"
cfg-if = "1.0.0"
portable-atomic = { version = "1.11.0", default-features = false }
enumset = { version = "1.1.6", default-features = false, optional = true }
@ -32,6 +31,7 @@ document-features = "0.2.11"
esp-alloc = { version = "0.8.0", path = "../esp-alloc", optional = true }
esp-config = { version = "0.5.0", path = "../esp-config" }
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated" }
esp-sync = { version = "0.0.0", path = "../esp-sync" }
esp-wifi-sys = "0.7.1"
num-derive = "0.4.2"
num-traits = { version = "0.2.19", default-features = false }
@ -40,6 +40,7 @@ procmacros = { version = "0.19.0", package = "esp-hal-procmacros", path = "../es
xtensa-lx-rt = { version = "0.20.0", path = "../xtensa-lx-rt", optional = true }
byte = { version = "0.2.7", optional = true }
ieee802154 = { version = "0.6.1", optional = true }
heapless = "0.9"
# Optional dependencies enabling ecosystem features
serde = { version = "1.0.218", default-features = false, features = ["derive"], optional = true }
@ -164,7 +165,7 @@ serde = ["dep:serde", "enumset?/serde"]
log-04 = ["dep:log-04", "esp-hal/log-04", "esp-wifi-sys/log"]
## Enable logging output using `defmt` and implement `defmt::Format` on certain types.
defmt = ["dep:defmt", "smoltcp?/defmt", "esp-hal/defmt", "bt-hci?/defmt", "esp-wifi-sys/defmt", "enumset/defmt"]
defmt = ["dep:defmt", "smoltcp?/defmt", "esp-hal/defmt", "bt-hci?/defmt", "esp-wifi-sys/defmt", "enumset/defmt", "heapless/defmt", "esp-sync/defmt"]
#! ### Unstable APIs
#! Unstable APIs are drivers and features that are not yet ready for general use.

View File

@ -1,6 +1,7 @@
use alloc::boxed::Box;
use core::ptr::{addr_of, addr_of_mut};
use esp_sync::RawMutex;
use esp_wifi_sys::c_types::{c_char, c_void};
use portable_atomic::{AtomicBool, Ordering};
@ -74,10 +75,7 @@ extern "C" fn notify_host_recv(data: *mut u8, len: u16) -> i32 {
data: Box::from(data),
};
critical_section::with(|cs| {
let mut queue = super::BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
queue.push_back(packet);
});
super::BT_STATE.with(|state| state.rx_queue.push_back(packet));
super::dump_packet_info(data);
@ -86,35 +84,35 @@ extern "C" fn notify_host_recv(data: *mut u8, len: u16) -> i32 {
0
}
type InterruptsFlagType = u32;
// This is fine, we're only accessing it inside a critical section (protected by INTERRUPT_LOCK).
static mut G_INTER_FLAGS: heapless::Vec<esp_sync::RestoreState, 10> = heapless::Vec::new();
static mut G_INTER_FLAGS: [InterruptsFlagType; 10] = [0; 10];
static mut INTERRUPT_DISABLE_CNT: usize = 0;
static INTERRUPT_LOCK: RawMutex = RawMutex::new();
#[ram]
unsafe extern "C" fn interrupt_enable() {
#[allow(static_mut_refs)]
unsafe {
INTERRUPT_DISABLE_CNT -= 1;
let flags = G_INTER_FLAGS[INTERRUPT_DISABLE_CNT];
trace!("interrupt_enable {}", flags);
critical_section::release(core::mem::transmute::<
InterruptsFlagType,
critical_section::RestoreState,
>(flags));
let flags = unwrap!(
G_INTER_FLAGS.pop(),
"interrupt_enable called without prior interrupt_disable"
);
trace!("interrupt_enable {:?}", flags);
INTERRUPT_LOCK.release(flags);
}
}
#[ram]
unsafe extern "C" fn interrupt_disable() {
trace!("interrupt_disable");
#[allow(static_mut_refs)]
unsafe {
let flags = core::mem::transmute::<critical_section::RestoreState, InterruptsFlagType>(
critical_section::acquire(),
let flags = INTERRUPT_LOCK.acquire();
unwrap!(
G_INTER_FLAGS.push(flags),
"interrupt_disable was called too many times"
);
G_INTER_FLAGS[INTERRUPT_DISABLE_CNT] = flags;
INTERRUPT_DISABLE_CNT += 1;
trace!("interrupt_disable {}", flags);
trace!("interrupt_disable {:?}", flags);
}
}
@ -430,7 +428,7 @@ pub(crate) fn ble_init() {
#[cfg(coex)]
crate::binary::include::coex_enable();
crate::common_adapter::chip_specific::phy_enable();
crate::common_adapter::phy_enable();
#[cfg(esp32)]
{
@ -465,7 +463,7 @@ pub(crate) fn ble_deinit() {
unsafe {
btdm_controller_deinit();
crate::common_adapter::chip_specific::phy_disable();
crate::common_adapter::phy_disable();
}
}
/// Sends HCI data to the BLE controller.

View File

@ -9,10 +9,10 @@ pub(crate) mod btdm;
pub(crate) mod npl;
use alloc::{boxed::Box, collections::vec_deque::VecDeque, vec::Vec};
use core::{cell::RefCell, mem::MaybeUninit};
use core::mem::MaybeUninit;
pub(crate) use ble::{ble_deinit, ble_init, send_hci};
use critical_section::Mutex;
use esp_sync::NonReentrantMutex;
#[cfg(btdm)]
use self::btdm as ble;
@ -36,9 +36,15 @@ pub(crate) unsafe extern "C" fn free(ptr: *mut crate::binary::c_types::c_void) {
unsafe { crate::compat::malloc::free(ptr.cast()) }
}
// Stores received packets until the the BLE stack dequeues them
static BT_RECEIVE_QUEUE: Mutex<RefCell<VecDeque<ReceivedPacket>>> =
Mutex::new(RefCell::new(VecDeque::new()));
struct BleState {
pub rx_queue: VecDeque<ReceivedPacket>,
pub hci_read_data: Vec<u8>,
}
static BT_STATE: NonReentrantMutex<BleState> = NonReentrantMutex::new(BleState {
rx_queue: VecDeque::new(),
hci_read_data: Vec::new(),
});
static mut HCI_OUT_COLLECTOR: MaybeUninit<HciOutCollector> = MaybeUninit::uninit();
@ -107,8 +113,6 @@ impl HciOutCollector {
}
}
static BLE_HCI_READ_DATA: Mutex<RefCell<Vec<u8>>> = Mutex::new(RefCell::new(Vec::new()));
#[derive(Debug, Clone)]
/// Represents a received BLE packet.
#[instability::unstable]
@ -127,51 +131,36 @@ impl defmt::Format for ReceivedPacket {
/// Checks if there is any HCI data available to read.
#[instability::unstable]
pub fn have_hci_read_data() -> bool {
critical_section::with(|cs| {
let queue = BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
let hci_read_data = BLE_HCI_READ_DATA.borrow_ref(cs);
!queue.is_empty() || !hci_read_data.is_empty()
})
BT_STATE.with(|state| !state.rx_queue.is_empty() || !state.hci_read_data.is_empty())
}
pub(crate) fn read_next(data: &mut [u8]) -> usize {
critical_section::with(|cs| {
let mut queue = BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
match queue.pop_front() {
Some(packet) => {
data[..packet.data.len()].copy_from_slice(&packet.data[..packet.data.len()]);
packet.data.len()
}
None => 0,
}
})
if let Some(packet) = BT_STATE.with(|state| state.rx_queue.pop_front()) {
data[..packet.data.len()].copy_from_slice(&packet.data[..packet.data.len()]);
packet.data.len()
} else {
0
}
}
/// Reads the next HCI packet from the BLE controller.
#[instability::unstable]
pub fn read_hci(data: &mut [u8]) -> usize {
critical_section::with(|cs| {
let mut hci_read_data = BLE_HCI_READ_DATA.borrow_ref_mut(cs);
if hci_read_data.is_empty() {
let mut queue = BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
if let Some(packet) = queue.pop_front() {
hci_read_data.extend_from_slice(&packet.data);
}
BT_STATE.with(|state| {
if state.hci_read_data.is_empty()
&& let Some(packet) = state.rx_queue.pop_front()
{
state.hci_read_data.extend_from_slice(&packet.data);
}
let l = usize::min(hci_read_data.len(), data.len());
data[..l].copy_from_slice(&hci_read_data[..l]);
hci_read_data.drain(..l);
let l = usize::min(state.hci_read_data.len(), data.len());
data[..l].copy_from_slice(&state.hci_read_data[..l]);
state.hci_read_data.drain(..l);
l
})
}
fn dump_packet_info(_buffer: &[u8]) {
#[cfg(dump_packets)]
critical_section::with(|_cs| {
info!("@HCIFRAME {:?}", _buffer);
});
info!("@HCIFRAME {:?}", _buffer);
}

View File

@ -619,13 +619,14 @@ unsafe extern "C" fn ble_npl_get_time_forever() -> u32 {
unsafe extern "C" fn ble_npl_hw_exit_critical(mask: u32) {
trace!("ble_npl_hw_exit_critical {}", mask);
unsafe {
critical_section::release(transmute::<u32, critical_section::RestoreState>(mask));
let token = esp_sync::RestoreState::new(mask);
crate::ESP_RADIO_LOCK.release(token);
}
}
unsafe extern "C" fn ble_npl_hw_enter_critical() -> u32 {
trace!("ble_npl_hw_enter_critical");
unsafe { transmute::<critical_section::RestoreState, u32>(critical_section::acquire()) }
unsafe { crate::ESP_RADIO_LOCK.acquire().inner() }
}
unsafe extern "C" fn ble_npl_hw_set_isr(_no: i32, _mask: u32) {
@ -1152,7 +1153,7 @@ pub(crate) fn ble_init() {
os_msys_init();
}
crate::common_adapter::chip_specific::phy_enable();
crate::common_adapter::phy_enable();
// init bb
bt_bb_v2_init_cmplx(1);
@ -1275,7 +1276,7 @@ pub(crate) fn ble_deinit() {
npl::esp_unregister_ext_funcs();
crate::common_adapter::chip_specific::phy_disable();
crate::common_adapter::phy_disable();
}
}
@ -1349,8 +1350,7 @@ unsafe extern "C" fn ble_hs_hci_rx_evt(cmd: *const u8, arg: *const c_void) -> i3
let payload = unsafe { core::slice::from_raw_parts(cmd.offset(2), len) };
trace!("$ pld = {:?}", payload);
critical_section::with(|cs| {
let mut queue = super::BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
super::BT_STATE.with(|state| {
let mut data = [0u8; 256];
data[0] = 0x04; // this is an event
@ -1358,7 +1358,7 @@ unsafe extern "C" fn ble_hs_hci_rx_evt(cmd: *const u8, arg: *const c_void) -> i3
data[2] = len as u8;
data[3..][..len].copy_from_slice(payload);
queue.push_back(ReceivedPacket {
state.rx_queue.push_back(ReceivedPacket {
data: Box::from(&data[..len + 3]),
});
@ -1381,14 +1381,13 @@ unsafe extern "C" fn ble_hs_rx_data(om: *const OsMbuf, arg: *const c_void) -> i3
let len = unsafe { (*om).om_len };
let data_slice = unsafe { core::slice::from_raw_parts(data_ptr, len as usize) };
critical_section::with(|cs| {
let mut queue = super::BT_RECEIVE_QUEUE.borrow_ref_mut(cs);
super::BT_STATE.with(|state| {
let mut data = [0u8; 256];
data[0] = 0x02; // ACL
data[1..][..data_slice.len()].copy_from_slice(data_slice);
queue.push_back(ReceivedPacket {
state.rx_queue.push_back(ReceivedPacket {
data: Box::from(&data[..data_slice.len() + 1]),
});
@ -1419,7 +1418,7 @@ pub fn send_hci(data: &[u8]) {
dump_packet_info(packet);
critical_section::with(|_cs| {
super::BT_STATE.with(|_state| {
if packet[0] == DATA_TYPE_COMMAND {
let cmd = r_ble_hci_trans_buf_alloc(BLE_HCI_TRANS_BUF_CMD);
core::ptr::copy_nonoverlapping(

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::{
binary::include::*,
hal::{peripherals::LPWR, ram},
@ -11,7 +9,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
LPWR::regs()
@ -33,63 +30,53 @@ pub(crate) unsafe fn bbpll_en_usb() {
// nothing for ESP32
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
// #if CONFIG_IDF_TARGET_ESP32
// // Update time stamp
// s_phy_rf_en_ts = esp_timer_get_time();
// // Update WiFi MAC time before WiFi/BT common clock is enabled
// phy_update_wifi_mac_time(false, s_phy_rf_en_ts);
// #endif
pub(super) unsafe fn phy_enable_inner() {
// #if CONFIG_IDF_TARGET_ESP32
// // Update time stamp
// s_phy_rf_en_ts = esp_timer_get_time();
// // Update WiFi MAC time before WiFi/BT common clock is enabled
// phy_update_wifi_mac_time(false, s_phy_rf_en_ts);
// #endif
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(coex)]
unsafe {
coex_bt_high_prio();
}
trace!("PHY ENABLE");
});
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(coex)]
unsafe {
coex_bt_high_prio();
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {
@ -149,7 +136,7 @@ unsafe extern "C" fn __esp_radio_esp_dport_access_reg_read(reg: u32) -> u32 {
unsafe extern "C" fn __esp_radio_phy_enter_critical() -> u32 {
trace!("phy_enter_critical");
unsafe { core::mem::transmute(critical_section::acquire()) }
unsafe { crate::ESP_RADIO_LOCK.acquire().inner() }
}
/// **************************************************************************
@ -171,9 +158,8 @@ unsafe extern "C" fn __esp_radio_phy_exit_critical(level: u32) {
trace!("phy_exit_critical {}", level);
unsafe {
critical_section::release(core::mem::transmute::<u32, critical_section::RestoreState>(
level,
));
let token = esp_sync::RestoreState::new(level);
crate::ESP_RADIO_LOCK.release(token);
}
}

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::binary::include::*;
const SOC_PHY_DIG_REGS_MEM_SIZE: usize = 21 * 4;
@ -8,7 +6,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
// In esp-idf, neither SOC_PM_SUPPORT_MODEM_PD or SOC_PM_SUPPORT_WIFI_PD are
@ -25,64 +22,54 @@ pub(crate) unsafe fn bbpll_en_usb() {
// nothing for ESP32-C2
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
});
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::{
binary::include::*,
hal::peripherals::{APB_CTRL, LPWR},
@ -11,7 +9,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
const SYSTEM_WIFIBB_RST: u32 = 1 << 0;
@ -68,64 +65,54 @@ pub(crate) unsafe fn bbpll_en_usb() {
}
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
});
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::binary::include::*;
const SOC_PHY_DIG_REGS_MEM_SIZE: usize = 21 * 4;
@ -8,7 +6,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
// In esp-idf, SOC_PMU_SUPPORTED is set which makes
@ -34,64 +31,54 @@ pub(crate) unsafe fn bbpll_en_usb() {
}
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
});
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disable WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disable WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {

View File

@ -1,6 +1,3 @@
// use atomic_polyfill::AtomicU32;
use portable_atomic::{AtomicU32, Ordering};
use crate::binary::include::*;
const SOC_PHY_DIG_REGS_MEM_SIZE: usize = 21 * 4;
@ -9,7 +6,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
// In esp-idf, SOC_PMU_SUPPORTED is set which makes
@ -26,64 +22,54 @@ pub(crate) unsafe fn bbpll_en_usb() {
// nothing for ESP32-H2
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
});
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// #if CONFIG_IDF_TARGET_ESP32
// // Update WiFi MAC time before disalbe WiFi/BT common peripheral
// clock phy_update_wifi_mac_time(true,
// esp_timer_get_time()); #endif
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::{
binary::include::*,
hal::{
@ -14,7 +12,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn enable_wifi_power_domain() {
const DPORT_WIFIBB_RST: u32 = 1 << 0;
@ -58,55 +55,45 @@ pub(crate) unsafe fn bbpll_en_usb() {
// nothing for ESP32-S2
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
});
#[cfg(feature = "ble")]
{
extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
}
@ -145,7 +132,7 @@ fn phy_digital_regs_store() {
unsafe extern "C" fn __esp_radio_phy_enter_critical() -> u32 {
trace!("phy_enter_critical");
unsafe { core::mem::transmute(critical_section::acquire()) }
unsafe { crate::ESP_RADIO_LOCK.acquire().inner() }
}
/// **************************************************************************
@ -167,9 +154,8 @@ unsafe extern "C" fn __esp_radio_phy_exit_critical(level: u32) {
trace!("phy_exit_critical {}", level);
unsafe {
critical_section::release(core::mem::transmute::<u32, critical_section::RestoreState>(
level,
));
let token = esp_sync::RestoreState::new(level);
crate::ESP_RADIO_LOCK.release(token);
}
}

View File

@ -1,5 +1,3 @@
use portable_atomic::{AtomicU32, Ordering};
use crate::binary::include::*;
const SOC_PHY_DIG_REGS_MEM_SIZE: usize = 21 * 4;
@ -8,7 +6,6 @@ static mut SOC_PHY_DIG_REGS_MEM: [u8; SOC_PHY_DIG_REGS_MEM_SIZE] = [0u8; SOC_PHY
static mut G_IS_PHY_CALIBRATED: bool = false;
static mut G_PHY_DIGITAL_REGS_MEM: *mut u32 = core::ptr::null_mut();
static mut S_IS_PHY_REG_STORED: bool = false;
static PHY_ACCESS_REF: AtomicU32 = AtomicU32::new(0);
pub(crate) fn phy_mem_init() {
unsafe {
@ -69,60 +66,49 @@ pub(crate) unsafe fn bbpll_en_usb() {
}
}
pub(crate) unsafe fn phy_enable() {
let count = PHY_ACCESS_REF.fetch_add(1, Ordering::SeqCst);
if count == 0 {
critical_section::with(|_| {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
});
pub(super) unsafe fn phy_enable_inner() {
unsafe {
super::phy_enable_clock();
}
if unsafe { !G_IS_PHY_CALIBRATED } {
super::phy_calibrate();
unsafe { G_IS_PHY_CALIBRATED = true };
} else {
unsafe {
phy_wakeup_init();
}
phy_digital_regs_load();
}
#[cfg(feature = "ble")]
{
unsafe extern "C" {
fn coex_pti_v2();
}
unsafe {
coex_pti_v2();
}
}
trace!("PHY ENABLE");
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
let count = PHY_ACCESS_REF.fetch_sub(1, Ordering::SeqCst);
if count == 1 {
critical_section::with(|_| {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
pub(super) unsafe fn phy_disable_inner() {
phy_digital_regs_store();
unsafe {
// Disable PHY and RF.
phy_close_rf();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable PHY temperature sensor
phy_xpd_tsens();
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
});
// Disable WiFi/BT common peripheral clock. Do not disable clock for hardware
// RNG
super::phy_disable_clock();
}
trace!("PHY DISABLE");
}
fn phy_digital_regs_load() {

View File

@ -1,3 +1,4 @@
use esp_sync::NonReentrantMutex;
use esp_wifi_sys::{
c_types::c_char,
include::{
@ -34,8 +35,30 @@ pub(crate) mod chip_specific;
#[cfg_attr(esp32s2, path = "phy_init_data_esp32s2.rs")]
pub(crate) mod phy_init_data;
static CAL_DATA: esp_hal::sync::Locked<[u8; core::mem::size_of::<esp_phy_calibration_data_t>()]> =
esp_hal::sync::Locked::new([0u8; core::mem::size_of::<esp_phy_calibration_data_t>()]);
static PHY_ACCESS_REF: NonReentrantMutex<usize> = NonReentrantMutex::new(0);
pub(crate) unsafe fn phy_enable() {
PHY_ACCESS_REF.with(|ref_count| {
*ref_count += 1;
if *ref_count == 1 {
unsafe { chip_specific::phy_enable_inner() };
}
})
}
#[allow(unused)]
pub(crate) unsafe fn phy_disable() {
PHY_ACCESS_REF.with(|ref_count| {
*ref_count -= 1;
if *ref_count == 0 {
unsafe { chip_specific::phy_disable_inner() };
}
})
}
static CAL_DATA: esp_sync::NonReentrantMutex<
[u8; core::mem::size_of::<esp_phy_calibration_data_t>()],
> = esp_sync::NonReentrantMutex::new([0u8; core::mem::size_of::<esp_phy_calibration_data_t>()]);
/// **************************************************************************
/// Name: esp_semphr_create

View File

@ -9,13 +9,14 @@ use core::{
};
use esp_hal::time::{Duration, Instant};
use esp_sync::NonReentrantMutex;
use esp_wifi_sys::{c_types::c_char, include::malloc};
use super::malloc::free;
use crate::{
CONFIG,
ESP_RADIO_LOCK,
binary::c_types::{c_int, c_void},
hal::sync::Locked,
memory_fence::memory_fence,
preempt::{current_task, yield_task},
};
@ -30,13 +31,13 @@ struct Mutex {
}
pub(crate) struct ConcurrentQueue {
raw_queue: Locked<RawQueue>,
raw_queue: NonReentrantMutex<RawQueue>,
}
impl ConcurrentQueue {
pub(crate) fn new(count: usize, item_size: usize) -> Self {
Self {
raw_queue: Locked::new(RawQueue::new(count, item_size)),
raw_queue: NonReentrantMutex::new(RawQueue::new(count, item_size)),
}
}
@ -215,8 +216,7 @@ pub(crate) fn sem_take(semphr: *mut c_void, tick: u32) -> i32 {
let sem = semphr as *mut u32;
'outer: loop {
let res = critical_section::with(|_| unsafe {
memory_fence();
let res = ESP_RADIO_LOCK.lock(|| unsafe {
let cnt = *sem;
if cnt > 0 {
*sem = cnt - 1;
@ -246,7 +246,7 @@ pub(crate) fn sem_give(semphr: *mut c_void) -> i32 {
trace!("semphr_give {:?}", semphr);
let sem = semphr as *mut u32;
critical_section::with(|_| unsafe {
ESP_RADIO_LOCK.lock(|| unsafe {
let cnt = *sem;
*sem = cnt + 1;
1
@ -290,7 +290,7 @@ pub(crate) fn lock_mutex(mutex: *mut c_void) -> i32 {
let current_task = current_task() as usize;
loop {
let mutex_locked = critical_section::with(|_| unsafe {
let mutex_locked = ESP_RADIO_LOCK.lock(|| unsafe {
if (*ptr).count == 0 {
(*ptr).locking_pid = current_task;
(*ptr).count += 1;
@ -302,7 +302,6 @@ pub(crate) fn lock_mutex(mutex: *mut c_void) -> i32 {
false
}
});
memory_fence();
if mutex_locked {
return 1;
@ -315,11 +314,10 @@ pub(crate) fn lock_mutex(mutex: *mut c_void) -> i32 {
pub(crate) fn unlock_mutex(mutex: *mut c_void) -> i32 {
trace!("mutex_unlock {:?}", mutex);
let ptr = mutex as *mut Mutex;
critical_section::with(|_| unsafe {
memory_fence();
if (*ptr).count > 0 {
(*ptr).count -= 1;
let mutex = unsafe { &mut *(mutex as *mut Mutex) };
ESP_RADIO_LOCK.lock(|| unsafe {
if mutex.count > 0 {
mutex.count -= 1;
1
} else {
0

View File

@ -1,6 +1,6 @@
use alloc::boxed::Box;
use esp_hal::sync::Locked;
use esp_sync::NonReentrantMutex;
use crate::binary::{
c_types,
@ -147,7 +147,7 @@ impl TimerQueue {
unsafe impl Send for TimerQueue {}
pub(crate) static TIMERS: Locked<TimerQueue> = Locked::new(TimerQueue::new());
pub(crate) static TIMERS: NonReentrantMutex<TimerQueue> = NonReentrantMutex::new(TimerQueue::new());
#[cfg(any(feature = "wifi", all(feature = "ble", npl)))]
pub(crate) fn compat_timer_arm(ets_timer: *mut ets_timer, tmout: u32, repeat: bool) {

View File

@ -11,14 +11,13 @@
use alloc::{boxed::Box, collections::vec_deque::VecDeque};
use core::{
cell::RefCell,
fmt::Debug,
marker::PhantomData,
task::{Context, Poll},
};
use critical_section::Mutex;
use esp_hal::asynch::AtomicWaker;
use esp_sync::NonReentrantMutex;
use portable_atomic::{AtomicBool, AtomicU8, Ordering};
use super::*;
@ -37,9 +36,14 @@ pub const ESP_NOW_MAX_DATA_LEN: usize = 250;
/// Broadcast address
pub const BROADCAST_ADDRESS: [u8; 6] = [0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8, 0xffu8];
// Stores received packets until dequeued by the user
static RECEIVE_QUEUE: Mutex<RefCell<VecDeque<ReceivedData>>> =
Mutex::new(RefCell::new(VecDeque::new()));
struct EspNowState {
// Stores received packets until dequeued by the user
rx_queue: VecDeque<ReceivedData>,
}
static STATE: NonReentrantMutex<EspNowState> = NonReentrantMutex::new(EspNowState {
rx_queue: VecDeque::new(),
});
/// This atomic behaves like a guard, so we need strict memory ordering when
/// operating it.
@ -50,6 +54,9 @@ static ESP_NOW_SEND_CB_INVOKED: AtomicBool = AtomicBool::new(false);
/// Status of esp now send, true for success, false for failure
static ESP_NOW_SEND_STATUS: AtomicBool = AtomicBool::new(true);
static ESP_NOW_TX_WAKER: AtomicWaker = AtomicWaker::new();
static ESP_NOW_RX_WAKER: AtomicWaker = AtomicWaker::new();
macro_rules! check_error {
($block:block) => {
match unsafe { $block } {
@ -632,10 +639,7 @@ impl EspNowReceiver<'_> {
/// Receives data from the ESP-NOW queue.
#[instability::unstable]
pub fn receive(&self) -> Option<ReceivedData> {
critical_section::with(|cs| {
let mut queue = RECEIVE_QUEUE.borrow_ref_mut(cs);
queue.pop_front()
})
STATE.with(|state| state.rx_queue.pop_front())
}
}
@ -842,14 +846,12 @@ impl<'d> EspNow<'d> {
}
unsafe extern "C" fn send_cb(_mac_addr: *const u8, status: esp_now_send_status_t) {
critical_section::with(|_| {
let is_success = status == esp_now_send_status_t_ESP_NOW_SEND_SUCCESS;
ESP_NOW_SEND_STATUS.store(is_success, Ordering::Relaxed);
let is_success = status == esp_now_send_status_t_ESP_NOW_SEND_SUCCESS;
ESP_NOW_SEND_STATUS.store(is_success, Ordering::Relaxed);
ESP_NOW_SEND_CB_INVOKED.store(true, Ordering::Release);
ESP_NOW_SEND_CB_INVOKED.store(true, Ordering::Release);
ESP_NOW_TX_WAKER.wake();
})
ESP_NOW_TX_WAKER.wake();
}
unsafe extern "C" fn rcv_cb(
@ -888,23 +890,19 @@ unsafe extern "C" fn rcv_cb(
rx_control,
};
let slice = unsafe { core::slice::from_raw_parts(data, data_len as usize) };
critical_section::with(|cs| {
let mut queue = RECEIVE_QUEUE.borrow_ref_mut(cs);
STATE.with(|state| {
let data = Box::from(slice);
if queue.len() >= RECEIVE_QUEUE_SIZE {
queue.pop_front();
if state.rx_queue.len() >= RECEIVE_QUEUE_SIZE {
state.rx_queue.pop_front();
}
queue.push_back(ReceivedData { data, info });
state.rx_queue.push_back(ReceivedData { data, info });
ESP_NOW_RX_WAKER.wake();
});
}
pub(super) static ESP_NOW_TX_WAKER: AtomicWaker = AtomicWaker::new();
pub(super) static ESP_NOW_RX_WAKER: AtomicWaker = AtomicWaker::new();
impl EspNowReceiver<'_> {
/// This function takes mutable reference to self because the
/// implementation of `ReceiveFuture` is not logically thread
@ -1004,10 +1002,7 @@ impl core::future::Future for ReceiveFuture<'_> {
fn poll(self: core::pin::Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
ESP_NOW_RX_WAKER.register(cx.waker());
if let Some(data) = critical_section::with(|cs| {
let mut queue = RECEIVE_QUEUE.borrow_ref_mut(cs);
queue.pop_front()
}) {
if let Some(data) = STATE.with(|state| state.rx_queue.pop_front()) {
Poll::Ready(data)
} else {
Poll::Pending

View File

@ -15,11 +15,9 @@
//! [IEEE 802.15.4]: https://en.wikipedia.org/wiki/IEEE_802.15.4
//! [esp-openthread]: https://github.com/esp-rs/esp-openthread
use core::cell::RefCell;
use byte::{BytesExt, TryRead};
use critical_section::Mutex;
use esp_hal::{clock::PhyClockGuard, peripherals::IEEE802154};
use esp_sync::NonReentrantMutex;
use ieee802154::mac::{self, FooterMode, FrameSerDesContext};
use self::{
@ -243,68 +241,48 @@ impl<'a> Ieee802154<'a> {
/// Set the transmit done callback function.
pub fn set_tx_done_callback(&mut self, callback: &'a mut (dyn FnMut() + Send)) {
critical_section::with(|cs| {
let mut tx_done_callback = TX_DONE_CALLBACK.borrow_ref_mut(cs);
CALLBACKS.with(|cbs| {
let cb: &'static mut (dyn FnMut() + Send) = unsafe { core::mem::transmute(callback) };
tx_done_callback.replace(cb);
cbs.tx_done = Some(cb);
});
}
/// Clear the transmit done callback function.
pub fn clear_tx_done_callback(&mut self) {
critical_section::with(|cs| {
let mut tx_done_callback = TX_DONE_CALLBACK.borrow_ref_mut(cs);
tx_done_callback.take();
});
CALLBACKS.with(|cbs| cbs.tx_done = None);
}
/// Set the receive available callback function.
pub fn set_rx_available_callback(&mut self, callback: &'a mut (dyn FnMut() + Send)) {
critical_section::with(|cs| {
let mut rx_available_callback = RX_AVAILABLE_CALLBACK.borrow_ref_mut(cs);
CALLBACKS.with(|cbs| {
let cb: &'static mut (dyn FnMut() + Send) = unsafe { core::mem::transmute(callback) };
rx_available_callback.replace(cb);
cbs.rx_available = Some(cb);
});
}
/// Clear the receive available callback function.
pub fn clear_rx_available_callback(&mut self) {
critical_section::with(|cs| {
let mut rx_available_callback = RX_AVAILABLE_CALLBACK.borrow_ref_mut(cs);
rx_available_callback.take();
});
CALLBACKS.with(|cbs| cbs.rx_available = None);
}
/// Set the transmit done callback function.
pub fn set_tx_done_callback_fn(&mut self, callback: fn()) {
critical_section::with(|cs| {
let mut tx_done_callback_fn = TX_DONE_CALLBACK_FN.borrow_ref_mut(cs);
tx_done_callback_fn.replace(callback);
});
CALLBACKS.with(|cbs| cbs.tx_done_fn = Some(callback));
}
/// Clear the transmit done callback function.
pub fn clear_tx_done_callback_fn(&mut self) {
critical_section::with(|cs| {
let mut tx_done_callback_fn = TX_DONE_CALLBACK_FN.borrow_ref_mut(cs);
tx_done_callback_fn.take();
});
CALLBACKS.with(|cbs| cbs.tx_done_fn = None);
}
/// Set the receive available callback function.
pub fn set_rx_available_callback_fn(&mut self, callback: fn()) {
critical_section::with(|cs| {
let mut rx_available_callback_fn = RX_AVAILABLE_CALLBACK_FN.borrow_ref_mut(cs);
rx_available_callback_fn.replace(callback);
});
CALLBACKS.with(|cbs| cbs.rx_available_fn = Some(callback));
}
/// Clear the receive available callback function.
pub fn clear_rx_available_callback_fn(&mut self) {
critical_section::with(|cs| {
let mut rx_available_callback_fn = RX_AVAILABLE_CALLBACK_FN.borrow_ref_mut(cs);
rx_available_callback_fn.take();
});
CALLBACKS.with(|cbs| cbs.rx_available_fn = None);
}
}
@ -333,54 +311,49 @@ pub fn rssi_to_lqi(rssi: i8) -> u8 {
}
}
static TX_DONE_CALLBACK: Mutex<RefCell<Option<&'static mut (dyn FnMut() + Send)>>> =
Mutex::new(RefCell::new(None));
struct Callbacks {
tx_done: Option<&'static mut (dyn FnMut() + Send)>,
rx_available: Option<&'static mut (dyn FnMut() + Send)>,
// TODO: remove these - Box<dyn FnMut> should be good enough
tx_done_fn: Option<fn()>,
rx_available_fn: Option<fn()>,
}
static RX_AVAILABLE_CALLBACK: Mutex<RefCell<Option<&'static mut (dyn FnMut() + Send)>>> =
Mutex::new(RefCell::new(None));
impl Callbacks {
fn call_tx_done(&mut self) {
if let Some(cb) = self.tx_done.as_mut() {
cb();
}
if let Some(cb) = self.tx_done_fn.as_mut() {
cb();
}
}
#[allow(clippy::type_complexity)]
static TX_DONE_CALLBACK_FN: Mutex<RefCell<Option<fn()>>> = Mutex::new(RefCell::new(None));
fn call_rx_available(&mut self) {
if let Some(cb) = self.rx_available.as_mut() {
cb();
}
if let Some(cb) = self.rx_available_fn.as_mut() {
cb();
}
}
}
#[allow(clippy::type_complexity)]
static RX_AVAILABLE_CALLBACK_FN: Mutex<RefCell<Option<fn()>>> = Mutex::new(RefCell::new(None));
static CALLBACKS: NonReentrantMutex<Callbacks> = NonReentrantMutex::new(Callbacks {
tx_done: None,
rx_available: None,
tx_done_fn: None,
rx_available_fn: None,
});
fn tx_done() {
trace!("tx_done callback");
critical_section::with(|cs| {
let mut tx_done_callback = TX_DONE_CALLBACK.borrow_ref_mut(cs);
let tx_done_callback = tx_done_callback.as_mut();
if let Some(tx_done_callback) = tx_done_callback {
tx_done_callback();
}
let mut tx_done_callback_fn = TX_DONE_CALLBACK_FN.borrow_ref_mut(cs);
let tx_done_callback_fn = tx_done_callback_fn.as_mut();
if let Some(tx_done_callback_fn) = tx_done_callback_fn {
tx_done_callback_fn();
}
});
CALLBACKS.with(|cbs| cbs.call_tx_done());
}
fn rx_available() {
trace!("rx available callback");
critical_section::with(|cs| {
let mut rx_available_callback = RX_AVAILABLE_CALLBACK.borrow_ref_mut(cs);
let rx_available_callback = rx_available_callback.as_mut();
if let Some(rx_available_callback) = rx_available_callback {
rx_available_callback();
}
let mut rx_available_callback_fn = RX_AVAILABLE_CALLBACK_FN.borrow_ref_mut(cs);
let rx_available_callback_fn = rx_available_callback_fn.as_mut();
if let Some(rx_available_callback_fn) = rx_available_callback_fn {
rx_available_callback_fn();
}
});
CALLBACKS.with(|cbs| cbs.call_rx_available());
}

View File

@ -1,6 +1,4 @@
use core::cell::RefCell;
use critical_section::Mutex;
use esp_sync::NonReentrantMutex;
use super::hal::{
set_cca_mode,
@ -25,8 +23,6 @@ pub(crate) const IEEE802154_FRAME_EXT_ADDR_SIZE: usize = 8;
const IEEE802154_MULTIPAN_0: u8 = 0;
const IEEE802154_MULTIPAN_MAX: usize = 4;
static PIB: Mutex<RefCell<Option<Pib>>> = Mutex::new(RefCell::new(None));
/// Frame pending mode
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
pub enum PendingMode {
@ -76,9 +72,27 @@ struct Pib {
cca_mode: CcaMode,
}
static PIB: NonReentrantMutex<Pib> = NonReentrantMutex::new(Pib {
auto_ack_tx: false,
auto_ack_rx: false,
enhance_ack_tx: false,
coordinator: false,
promiscuous: false,
rx_when_idle: false,
txpower: 0,
channel: 0,
pending_mode: PendingMode::Disable,
multipan_mask: 0,
panid: [0u16; 4],
short_addr: [0u16; IEEE802154_MULTIPAN_MAX],
ext_addr: [[0; IEEE802154_FRAME_EXT_ADDR_SIZE]; IEEE802154_MULTIPAN_MAX],
cca_threshold: 0,
cca_mode: CcaMode::Carrier,
});
pub(crate) fn ieee802154_pib_init() {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).replace(Pib {
PIB.with(|pib| {
*pib = Pib {
auto_ack_tx: true,
auto_ack_rx: true,
enhance_ack_tx: true,
@ -94,106 +108,75 @@ pub(crate) fn ieee802154_pib_init() {
ext_addr: [[0xffu8; IEEE802154_FRAME_EXT_ADDR_SIZE]; IEEE802154_MULTIPAN_MAX],
cca_threshold: CONFIG_IEEE802154_CCA_THRESHOLD,
cca_mode: CcaMode::Ed,
});
}
});
}
pub(crate) fn ieee802154_pib_set_panid(index: u8, panid: u16) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().panid[index as usize] = panid;
});
PIB.with(|pib| pib.panid[index as usize] = panid)
}
pub(crate) fn ieee802154_pib_set_promiscuous(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().promiscuous = enable;
});
PIB.with(|pib| pib.promiscuous = enable)
}
pub(crate) fn ieee802154_pib_set_auto_ack_tx(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().auto_ack_tx = enable;
});
PIB.with(|pib| pib.auto_ack_tx = enable)
}
pub(crate) fn ieee802154_pib_set_auto_ack_rx(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().auto_ack_rx = enable;
});
PIB.with(|pib| pib.auto_ack_rx = enable)
}
pub(crate) fn ieee802154_pib_set_enhance_ack_tx(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().enhance_ack_tx = enable;
});
PIB.with(|pib| pib.enhance_ack_tx = enable)
}
pub(crate) fn ieee802154_pib_set_coordinator(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().coordinator = enable;
});
PIB.with(|pib| pib.coordinator = enable)
}
pub(crate) fn ieee802154_pib_set_rx_when_idle(enable: bool) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().rx_when_idle = enable;
});
PIB.with(|pib| pib.rx_when_idle = enable)
}
pub(crate) fn ieee802154_pib_get_rx_when_idle() -> bool {
critical_section::with(|cs| PIB.borrow_ref_mut(cs).as_mut().unwrap().rx_when_idle)
PIB.with(|pib| pib.rx_when_idle)
}
pub(crate) fn ieee802154_pib_set_tx_power(power: i8) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().txpower = power;
});
PIB.with(|pib| pib.txpower = power)
}
pub(crate) fn ieee802154_pib_set_channel(channel: u8) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().channel = channel;
});
PIB.with(|pib| pib.channel = channel)
}
pub(crate) fn ieee802154_pib_set_pending_mode(mode: PendingMode) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().pending_mode = mode;
});
PIB.with(|pib| pib.pending_mode = mode)
}
pub(crate) fn ieee802154_pib_set_short_address(index: u8, address: u16) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().short_addr[index as usize] = address;
});
PIB.with(|pib| pib.short_addr[index as usize] = address)
}
pub(crate) fn ieee802154_pib_set_extended_address(
index: u8,
address: [u8; IEEE802154_FRAME_EXT_ADDR_SIZE],
) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().ext_addr[index as usize] = address;
});
PIB.with(|pib| pib.ext_addr[index as usize] = address)
}
pub(crate) fn ieee802154_pib_set_cca_theshold(cca_threshold: i8) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().cca_threshold = cca_threshold;
});
PIB.with(|pib| pib.cca_threshold = cca_threshold)
}
pub(crate) fn ieee802154_pib_set_cca_mode(mode: CcaMode) {
critical_section::with(|cs| {
PIB.borrow_ref_mut(cs).as_mut().unwrap().cca_mode = mode;
});
PIB.with(|pib| pib.cca_mode = mode)
}
pub(crate) fn ieee802154_pib_update() {
critical_section::with(|cs| {
let mut pib = PIB.borrow_ref_mut(cs);
let pib = pib.as_mut().unwrap();
PIB.with(|pib| {
set_freq(channel_to_freq(pib.channel));
set_power(ieee802154_txpower_convert(pib.txpower));
@ -210,7 +193,7 @@ pub(crate) fn ieee802154_pib_update() {
set_coordinator(pib.coordinator);
set_promiscuous(pib.promiscuous);
set_pending_mode(pib.pending_mode == PendingMode::Enhanced);
});
})
}
fn channel_to_freq(channel: u8) -> u8 {

View File

@ -1,13 +1,12 @@
use alloc::collections::VecDeque as Queue;
use core::cell::RefCell;
use critical_section::Mutex;
use esp_hal::{
clock::{ModemClockController, PhyClockGuard, init_radio_clocks},
handler,
interrupt::Priority,
peripherals::IEEE802154,
};
use esp_sync::NonReentrantMutex;
use esp_wifi_sys::include::{
ieee802154_coex_event_t,
ieee802154_coex_event_t_IEEE802154_IDLE,
@ -33,8 +32,16 @@ const RX_QUEUE_SIZE: usize =
esp_config::esp_config_int!(usize, "ESP_RADIO_CONFIG_IEEE802154_RX_QUEUE_SIZE");
static mut RX_BUFFER: [u8; FRAME_SIZE] = [0u8; FRAME_SIZE];
static RX_QUEUE: Mutex<RefCell<Queue<RawReceived>>> = Mutex::new(RefCell::new(Queue::new()));
static STATE: Mutex<RefCell<Ieee802154State>> = Mutex::new(RefCell::new(Ieee802154State::Idle));
struct IeeeState {
state: Ieee802154State,
rx_queue: Queue<RawReceived>,
}
static STATE: NonReentrantMutex<IeeeState> = NonReentrantMutex::new(IeeeState {
state: Ieee802154State::Idle,
rx_queue: Queue::new(),
});
unsafe extern "C" {
fn bt_bb_v2_init_cmplx(print_version: u8); // from libbtbb.a
@ -79,7 +86,7 @@ pub(crate) fn esp_ieee802154_enable(mut radio: IEEE802154<'_>) -> PhyClockGuard<
radio.enable_modem_clock(true);
unsafe {
crate::common_adapter::chip_specific::phy_enable();
crate::common_adapter::phy_enable();
}
esp_btbb_enable();
ieee802154_mac_init();
@ -137,10 +144,14 @@ fn ieee802154_mac_init() {
unsafe {
esp_hal::interrupt::bind_interrupt(
esp_hal::peripherals::Interrupt::ZB_MAC,
ZB_MAC.handler(),
zb_mac_handler.handler(),
);
}
esp_hal::interrupt::enable(esp_hal::peripherals::Interrupt::ZB_MAC, ZB_MAC.priority()).unwrap();
esp_hal::interrupt::enable(
esp_hal::peripherals::Interrupt::ZB_MAC,
zb_mac_handler.priority(),
)
.unwrap();
}
fn ieee802154_set_txrx_pti(txrx_scene: Ieee802154TxRxScene) {
@ -174,7 +185,7 @@ pub fn tx_init(frame: *const u8) {
}
pub fn ieee802154_transmit(frame: *const u8, cca: bool) -> i32 {
critical_section::with(|cs| {
STATE.with(|state| {
tx_init(frame);
ieee802154_set_txrx_pti(Ieee802154TxRxScene::Tx);
@ -190,7 +201,7 @@ pub fn ieee802154_transmit(frame: *const u8, cca: bool) -> i32 {
// {
// ieee802154_state = IEEE802154_STATE_TX_ENH_ACK;
// } else {
*STATE.borrow_ref_mut(cs) = Ieee802154State::Transmit;
state.state = Ieee802154State::Transmit;
// }
}
});
@ -199,25 +210,22 @@ pub fn ieee802154_transmit(frame: *const u8, cca: bool) -> i32 {
}
pub fn ieee802154_receive() -> i32 {
critical_section::with(|cs| {
if *STATE.borrow_ref(cs) == Ieee802154State::Receive {
STATE.with(|state| {
if state.state == Ieee802154State::Receive {
return;
}
rx_init();
enable_rx();
*STATE.borrow_ref_mut(cs) = Ieee802154State::Receive;
state.state = Ieee802154State::Receive;
});
0 // ESP-OK
}
pub fn ieee802154_poll() -> Option<RawReceived> {
critical_section::with(|cs| {
let mut queue = RX_QUEUE.borrow_ref_mut(cs);
queue.pop_front()
})
STATE.with(|state| state.rx_queue.pop_front())
}
fn rx_init() {
@ -241,10 +249,7 @@ fn stop_current_operation() {
}
fn set_next_rx_buffer() {
#[allow(unused_unsafe)] // stable compiler needs unsafe, nightly complains about it
unsafe {
set_rx_addr(core::ptr::addr_of_mut!(RX_BUFFER).cast());
}
set_rx_addr(core::ptr::addr_of_mut!(RX_BUFFER).cast());
}
pub fn set_promiscuous(enable: bool) {
@ -317,17 +322,16 @@ fn ieee802154_sec_update() {
}
fn next_operation() {
let previous_operation = critical_section::with(|cs| {
let state = *STATE.borrow_ref(cs);
if ieee802154_pib_get_rx_when_idle() {
let previous_operation = STATE.with(|state| {
let prev_state = state.state;
state.state = if ieee802154_pib_get_rx_when_idle() {
enable_rx();
*STATE.borrow_ref_mut(cs) = Ieee802154State::Receive;
Ieee802154State::Receive
} else {
*STATE.borrow_ref_mut(cs) = Ieee802154State::Idle;
}
Ieee802154State::Idle
};
state
prev_state
});
match previous_operation {
@ -339,7 +343,7 @@ fn next_operation() {
}
#[handler(priority = Priority::Priority1)]
fn ZB_MAC() {
fn zb_mac_handler() {
trace!("ZB_MAC interrupt");
let events = events();
@ -371,14 +375,13 @@ fn ZB_MAC() {
"Received raw {:?}",
crate::fmt::Bytes(&*core::ptr::addr_of!(RX_BUFFER))
);
critical_section::with(|cs| {
let mut queue = RX_QUEUE.borrow_ref_mut(cs);
if queue.len() <= RX_QUEUE_SIZE {
STATE.with(|state| {
if state.rx_queue.len() <= RX_QUEUE_SIZE {
let item = RawReceived {
data: RX_BUFFER,
channel: freq_to_channel(freq()),
};
queue.push_back(item);
state.rx_queue.push_back(item);
} else {
warn!("Receive queue full");
}
@ -390,7 +393,7 @@ fn ZB_MAC() {
&RX_BUFFER[1..][..RX_BUFFER[0] as usize]
};
if will_auto_send_ack(frm) {
*STATE.borrow_ref_mut(cs) = Ieee802154State::TxAck;
state.state = Ieee802154State::TxAck;
} else if should_send_enhanced_ack(frm) {
// TODO
} else {

View File

@ -116,6 +116,7 @@ pub use common_adapter::{phy_calibration_data, set_phy_calibration_data};
use esp_config::*;
use esp_hal::{self as hal};
use esp_radio_preempt_driver as preempt;
use esp_sync::RawMutex;
#[cfg(esp32)]
use hal::analog::adc::{release_adc2, try_claim_adc2};
use hal::{
@ -183,6 +184,8 @@ pub mod tasks;
pub(crate) mod memory_fence;
pub(crate) static ESP_RADIO_LOCK: RawMutex = RawMutex::new();
// this is just to verify that we use the correct defaults in `build.rs`
#[allow(clippy::assertions_on_constants)] // TODO: try assert_eq once it's usable in const context
const _: () = {

View File

@ -1,6 +1,9 @@
#[cfg(any(feature = "wifi", feature = "ble"))]
#[allow(unused_imports)]
use crate::hal::{interrupt, peripherals::Interrupt};
use crate::{
ESP_RADIO_LOCK,
hal::{interrupt, peripherals::Interrupt},
};
pub(crate) fn setup_radio_isr() {
// no-op
@ -41,13 +44,13 @@ extern "C" fn WIFI_PWR() {
}
trace!("interrupt 1 done");
};
}
}
#[cfg(feature = "ble")]
#[unsafe(no_mangle)]
extern "C" fn RWBLE() {
critical_section::with(|_| unsafe {
ESP_RADIO_LOCK.lock(|| unsafe {
let (fnc, arg) = crate::ble::btdm::ble_os_adapter_chip_specific::ISR_INTERRUPT_8;
trace!("interrupt RWBLE {:?} {:?}", fnc, arg);
if !fnc.is_null() {
@ -60,7 +63,7 @@ extern "C" fn RWBLE() {
#[cfg(feature = "ble")]
#[unsafe(no_mangle)]
extern "C" fn BT_BB() {
critical_section::with(|_| unsafe {
ESP_RADIO_LOCK.lock(|| unsafe {
let (fnc, arg) = crate::ble::btdm::ble_os_adapter_chip_specific::ISR_INTERRUPT_5;
trace!("interrupt RWBT {:?} {:?}", fnc, arg);

View File

@ -2,7 +2,7 @@
use alloc::boxed::Box;
use esp_hal::sync::Locked;
use esp_sync::NonReentrantMutex;
use super::WifiEvent;
use crate::wifi::include::{
@ -15,7 +15,7 @@ pub(crate) mod sealed {
pub trait Event {
/// Get the static reference to the handler for this event.
fn handler() -> &'static Locked<Option<Box<Handler<Self>>>>;
fn handler() -> &'static NonReentrantMutex<Option<Box<Handler<Self>>>>;
/// # Safety
/// `ptr` must be a valid for casting to this event's inner event data.
unsafe fn from_raw_event_data(ptr: *mut crate::binary::c_types::c_void) -> Self;
@ -91,8 +91,9 @@ macro_rules! impl_wifi_event {
unsafe fn from_raw_event_data(_: *mut crate::binary::c_types::c_void) -> Self {
Self
}
fn handler() -> &'static Locked<Option<Box<Handler<Self>>>> {
static HANDLE: Locked<Option<Box<Handler<$newtype>>>> = Locked::new(None);
fn handler() -> &'static NonReentrantMutex<Option<Box<Handler<Self>>>> {
static HANDLE: NonReentrantMutex<Option<Box<Handler<$newtype>>>> =
NonReentrantMutex::new(None);
&HANDLE
}
}
@ -107,8 +108,9 @@ macro_rules! impl_wifi_event {
unsafe fn from_raw_event_data(ptr: *mut crate::binary::c_types::c_void) -> Self {
Self(unsafe { *ptr.cast() })
}
fn handler() -> &'static Locked<Option<Box<Handler<Self>>>> {
static HANDLE: Locked<Option<Box<Handler<$newtype>>>> = Locked::new(None);
fn handler() -> &'static NonReentrantMutex<Option<Box<Handler<Self>>>> {
static HANDLE: NonReentrantMutex<Option<Box<Handler<$newtype>>>> =
NonReentrantMutex::new(None);
&HANDLE
}
}
@ -752,7 +754,7 @@ pub(crate) unsafe fn handle_raw<Event: EventExt>(
}
/// Handle event regardless of its type.
///
///
/// # Safety
/// Arguments should be self-consistent.
#[rustfmt::skip]

View File

@ -7,7 +7,7 @@ use esp_wifi_sys::include::{
wpa_crypto_funcs_t,
};
use super::os_adapter::*;
use super::os_adapter::{self, *};
use crate::common_adapter::*;
#[cfg(all(coex, any(esp32, esp32c2, esp32c3, esp32c6, esp32s3)))]
@ -120,8 +120,8 @@ static __ESP_RADIO_G_WIFI_OSI_FUNCS: wifi_osi_funcs_t = wifi_osi_funcs_t {
_dport_access_stall_other_cpu_end_wrap: Some(dport_access_stall_other_cpu_end_wrap),
_wifi_apb80m_request: Some(wifi_apb80m_request),
_wifi_apb80m_release: Some(wifi_apb80m_release),
_phy_disable: Some(phy_disable),
_phy_enable: Some(phy_enable),
_phy_disable: Some(os_adapter::phy_disable),
_phy_enable: Some(os_adapter::phy_enable),
_phy_update_country_info: Some(phy_update_country_info),
_read_mac: Some(read_mac),
_timer_arm: Some(ets_timer_arm),

View File

@ -18,7 +18,8 @@ use core::{
};
use enumset::{EnumSet, EnumSetType};
use esp_hal::{asynch::AtomicWaker, sync::Locked};
use esp_hal::asynch::AtomicWaker;
use esp_sync::NonReentrantMutex;
#[cfg(all(any(feature = "sniffer", feature = "esp-now"), feature = "unstable"))]
use esp_wifi_sys::include::wifi_pkt_rx_ctrl_t;
use esp_wifi_sys::include::{
@ -1181,9 +1182,11 @@ impl CsiConfig {
const RX_QUEUE_SIZE: usize = crate::CONFIG.rx_queue_size;
const TX_QUEUE_SIZE: usize = crate::CONFIG.tx_queue_size;
pub(crate) static DATA_QUEUE_RX_AP: Locked<VecDeque<PacketBuffer>> = Locked::new(VecDeque::new());
pub(crate) static DATA_QUEUE_RX_AP: NonReentrantMutex<VecDeque<PacketBuffer>> =
NonReentrantMutex::new(VecDeque::new());
pub(crate) static DATA_QUEUE_RX_STA: Locked<VecDeque<PacketBuffer>> = Locked::new(VecDeque::new());
pub(crate) static DATA_QUEUE_RX_STA: NonReentrantMutex<VecDeque<PacketBuffer>> =
NonReentrantMutex::new(VecDeque::new());
/// Common errors.
#[derive(Debug, Clone, Copy)]
@ -1811,7 +1814,7 @@ impl WifiDeviceMode {
}
}
fn data_queue_rx(&self) -> &'static Locked<VecDeque<PacketBuffer>> {
fn data_queue_rx(&self) -> &'static NonReentrantMutex<VecDeque<PacketBuffer>> {
match self {
WifiDeviceMode::Sta => &DATA_QUEUE_RX_STA,
WifiDeviceMode::Ap => &DATA_QUEUE_RX_AP,
@ -2158,7 +2161,7 @@ impl PromiscuousPkt<'_> {
}
#[cfg(all(feature = "sniffer", feature = "unstable"))]
static SNIFFER_CB: Locked<Option<fn(PromiscuousPkt<'_>)>> = Locked::new(None);
static SNIFFER_CB: NonReentrantMutex<Option<fn(PromiscuousPkt<'_>)>> = NonReentrantMutex::new(None);
#[cfg(all(feature = "sniffer", feature = "unstable"))]
unsafe extern "C" fn promiscuous_rx_cb(buf: *mut core::ffi::c_void, frame_type: u32) {
@ -3154,7 +3157,7 @@ impl WifiController<'_> {
}
fn clear_events(events: impl Into<EnumSet<WifiEvent>>) {
WIFI_EVENTS.with(|evts| evts.get_mut().remove_all(events.into()));
WIFI_EVENTS.with(|evts| evts.remove_all(events.into()));
}
/// Wait for one [`WifiEvent`].
@ -3223,7 +3226,7 @@ impl core::future::Future for WifiEventFuture {
cx: &mut core::task::Context<'_>,
) -> Poll<Self::Output> {
self.event.waker().register(cx.waker());
if WIFI_EVENTS.with(|events| events.get_mut().remove(self.event)) {
if WIFI_EVENTS.with(|events| events.remove(self.event)) {
Poll::Ready(())
} else {
Poll::Pending
@ -3251,7 +3254,6 @@ impl core::future::Future for MultiWifiEventFuture {
cx: &mut core::task::Context<'_>,
) -> Poll<Self::Output> {
let output = WIFI_EVENTS.with(|events| {
let events = events.get_mut();
let active = events.intersection(self.event);
events.remove_all(active);
active

View File

@ -46,10 +46,7 @@ pub unsafe extern "C" fn set_isr(
trace!("set_isr - interrupt {} function {:?} arg {:?}", n, f, arg);
match n {
0 => unsafe {
crate::wifi::ISR_INTERRUPT_1 = (f, arg);
},
1 => unsafe {
0 | 1 => unsafe {
crate::wifi::ISR_INTERRUPT_1 = (f, arg);
},
_ => panic!("set_isr - unsupported interrupt number {}", n),

View File

@ -7,9 +7,10 @@
#[cfg_attr(esp32s3, path = "esp32s3.rs")]
pub(crate) mod os_adapter_chip_specific;
use core::{cell::RefCell, ptr::addr_of_mut};
use core::ptr::addr_of_mut;
use enumset::EnumSet;
use esp_sync::{NonReentrantMutex, RawMutex};
use super::WifiEvent;
use crate::{
@ -29,11 +30,7 @@ use crate::{
},
malloc::calloc_internal,
},
hal::{
clock::ModemClockController,
peripherals::WIFI,
sync::{Locked, RawMutex},
},
hal::{clock::ModemClockController, peripherals::WIFI},
memory_fence::memory_fence,
preempt::yield_task,
};
@ -44,8 +41,8 @@ static mut QUEUE_HANDLE: *mut ConcurrentQueue = core::ptr::null_mut();
// useful for waiting for events - clear and wait for the event bit to be set
// again
pub(crate) static WIFI_EVENTS: Locked<RefCell<EnumSet<WifiEvent>>> =
Locked::new(RefCell::new(enumset::enum_set!()));
pub(crate) static WIFI_EVENTS: NonReentrantMutex<EnumSet<WifiEvent>> =
NonReentrantMutex::new(enumset::enum_set!());
/// **************************************************************************
/// Name: wifi_env_is_chip
@ -223,7 +220,7 @@ pub unsafe extern "C" fn wifi_int_disable(
trace!("wifi_int_disable");
// TODO: can we use wifi_int_mux?
let token = unsafe { WIFI_LOCK.acquire() };
unsafe { core::mem::transmute::<esp_hal::sync::RestoreState, u32>(token) }
unsafe { core::mem::transmute::<esp_sync::RestoreState, u32>(token) }
}
/// **************************************************************************
@ -246,7 +243,7 @@ pub unsafe extern "C" fn wifi_int_restore(
tmp: u32,
) {
trace!("wifi_int_restore");
let token = unsafe { core::mem::transmute::<u32, esp_hal::sync::RestoreState>(tmp) };
let token = unsafe { core::mem::transmute::<u32, esp_sync::RestoreState>(tmp) };
unsafe { WIFI_LOCK.release(token) }
}
@ -871,7 +868,7 @@ pub unsafe extern "C" fn event_post(
let event = unwrap!(WifiEvent::from_i32(event_id));
trace!("EVENT: {:?}", event);
WIFI_EVENTS.with(|events| events.borrow_mut().insert(event));
WIFI_EVENTS.with(|events| events.insert(event));
let handled =
unsafe { super::event::dispatch_event_handler(event, event_data, event_data_size) };
@ -988,9 +985,7 @@ pub unsafe extern "C" fn wifi_apb80m_release() {
pub unsafe extern "C" fn phy_disable() {
trace!("phy_disable");
unsafe {
crate::common_adapter::chip_specific::phy_disable();
}
unsafe { crate::common_adapter::phy_disable() }
}
/// **************************************************************************
@ -1010,9 +1005,7 @@ pub unsafe extern "C" fn phy_enable() {
// quite some code needed here
trace!("phy_enable");
unsafe {
crate::common_adapter::chip_specific::phy_enable();
}
unsafe { crate::common_adapter::phy_enable() }
}
/// **************************************************************************

View File

@ -22,10 +22,9 @@ embedded-storage = "0.3.1"
procmacros = { version = "0.19.0", package = "esp-hal-procmacros", path = "../esp-hal-procmacros" }
# Optional dependencies
critical-section = { version = "1.2.0", optional = true }
esp-sync = { version = "0.0.0", path = "../esp-sync", optional = true }
esp-rom-sys = { version = "0.1.1", path = "../esp-rom-sys", optional = true }
# Unstable dependencies that are not (strictly) part of the public API
document-features = "0.2.11"
@ -33,7 +32,7 @@ document-features = "0.2.11"
default = ["critical-section"]
## Place the flash operations in a critical section
critical-section = ["dep:critical-section"]
critical-section = []
## Bytewise read emulation
bytewise-read = []
@ -42,18 +41,18 @@ bytewise-read = []
#! One of the following features must be enabled to select the target chip:
##
esp32c2 = ["esp-rom-sys/esp32c2"]
esp32c2 = ["esp-rom-sys/esp32c2", "esp-sync/esp32c2"]
##
esp32c3 = ["esp-rom-sys/esp32c3"]
esp32c3 = ["esp-rom-sys/esp32c3", "esp-sync/esp32c3"]
##
esp32c6 = ["esp-rom-sys/esp32c6"]
esp32c6 = ["esp-rom-sys/esp32c6", "esp-sync/esp32c6"]
##
esp32h2 = ["esp-rom-sys/esp32h2"]
esp32h2 = ["esp-rom-sys/esp32h2", "esp-sync/esp32h2"]
##
esp32 = ["esp-rom-sys/esp32"]
esp32 = ["esp-rom-sys/esp32", "esp-sync/esp32"]
##
esp32s2 = ["esp-rom-sys/esp32s2"]
esp32s2 = ["esp-rom-sys/esp32s2", "esp-sync/esp32s2"]
##
esp32s3 = ["esp-rom-sys/esp32s3"]
esp32s3 = ["esp-rom-sys/esp32s3", "esp-sync/esp32s3"]
## Used for testing on a host.
emulation = []

View File

@ -19,7 +19,11 @@ mod storage;
#[inline(always)]
fn maybe_with_critical_section<R>(f: impl FnOnce() -> R) -> R {
#[cfg(feature = "critical-section")]
return critical_section::with(|_| f());
{
static LOCK: esp_sync::RawMutex = esp_sync::RawMutex::new();
return LOCK.lock(f);
}
#[cfg(not(feature = "critical-section"))]
f()

12
esp-sync/CHANGELOG.md Normal file
View File

@ -0,0 +1,12 @@
# Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [Unreleased]
### Added
- Initial release (#4023)

55
esp-sync/Cargo.toml Normal file
View File

@ -0,0 +1,55 @@
[package]
name = "esp-sync"
version = "0.0.0"
edition = "2024"
rust-version = "1.88.0"
description = "Synchronization primitives for Espressif devices"
documentation = "https://docs.espressif.com/projects/rust/esp-sync/latest/"
categories = ["no-std", "embedded", "concurrency"]
repository = "https://github.com/esp-rs/esp-hal"
license = "MIT OR Apache-2.0"
exclude = [ "MIGRATING-*", "CHANGELOG.md" ]
[dependencies]
cfg-if = "1"
document-features = "0.2"
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated" }
embassy-sync-06 = { package = "embassy-sync", version = "0.6" }
embassy-sync-07 = { package = "embassy-sync", version = "0.7" }
# Logging interfaces, they are mutually exclusive so they need to be behind separate features.
defmt = { version = "1.0.1", optional = true }
log-04 = { package = "log", version = "0.4", optional = true }
[target.'cfg(target_arch = "riscv32")'.dependencies]
riscv = { version = "0.14.0" }
[target.'cfg(target_arch = "xtensa")'.dependencies]
xtensa-lx = { version = "0.12.0", path = "../xtensa-lx" }
[build-dependencies]
esp-metadata-generated = { version = "0.1.0", path = "../esp-metadata-generated", features = ["build-script"] }
[features]
#! ### Chip Support Feature Flags
## Target the ESP32.
esp32 = ["esp-metadata-generated/esp32"]
## Target the ESP32-C2.
esp32c2 = ["esp-metadata-generated/esp32c2"]
## Target the ESP32-C3.
esp32c3 = ["esp-metadata-generated/esp32c3"]
## Target the ESP32-C6.
esp32c6 = ["esp-metadata-generated/esp32c6"]
## Target the ESP32-H2.
esp32h2 = ["esp-metadata-generated/esp32h2"]
## Target the ESP32-S2.
esp32s2 = ["esp-metadata-generated/esp32s2"]
## Target the ESP32-S3.
esp32s3 = ["esp-metadata-generated/esp32s3"]
#! ### Logging Feature Flags
## Enable logging output using version 0.4 of the `log` crate.
log-04 = ["dep:log-04"]
## Enable logging output using `defmt` and implement `defmt::Format` on certain types.
defmt = ["dep:defmt"]

29
esp-sync/README.md Normal file
View File

@ -0,0 +1,29 @@
# esp-sync
[![Crates.io](https://img.shields.io/crates/v/esp-sync?labelColor=1C2C2E&color=C96329&logo=Rust&style=flat-square)](https://crates.io/crates/esp-sync)
[![docs.rs](https://img.shields.io/docsrs/esp-sync?labelColor=1C2C2E&color=C96329&logo=rust&style=flat-square)](https://docs.espressif.com/projects/rust/esp-sync/latest/)
![MSRV](https://img.shields.io/badge/MSRV-1.86.0-blue?labelColor=1C2C2E&style=flat-square)
![Crates.io](https://img.shields.io/crates/l/esp-sync?labelColor=1C2C2E&style=flat-square)
[![Matrix](https://img.shields.io/matrix/esp-rs:matrix.org?label=join%20matrix&labelColor=1C2C2E&color=BEC5C9&logo=matrix&style=flat-square)](https://matrix.to/#/#esp-rs:matrix.org)
This crate provides an optimized raw mutex (a lock type that doesn't wrap any data) for ESP32 devices.
## Minimum Supported Rust Version (MSRV)
This crate is guaranteed to compile when using the latest stable Rust version at the time of the crate's release. It _might_ compile with older versions, but that may change in any new release, including patches.
## License
Licensed under either of
- Apache License, Version 2.0 ([LICENSE-APACHE](../LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
- MIT license ([LICENSE-MIT](../LICENSE-MIT) or http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the
work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any
additional terms or conditions.

9
esp-sync/build.rs Normal file
View File

@ -0,0 +1,9 @@
use std::error::Error;
fn main() -> Result<(), Box<dyn Error>> {
// Define all necessary configuration symbols for the configured device:
let chip = esp_metadata_generated::Chip::from_cargo_feature()?;
chip.define_cfgs();
Ok(())
}

281
esp-sync/src/fmt.rs Normal file
View File

@ -0,0 +1,281 @@
#![macro_use]
#![allow(unused_macros)]
#[collapse_debuginfo(yes)]
macro_rules! assert {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::assert!($($x)*);
} else {
::core::assert!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! assert_eq {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::assert_eq!($($x)*);
} else {
::core::assert_eq!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! assert_ne {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::assert_ne!($($x)*);
} else {
::core::assert_ne!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! debug_assert {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::debug_assert!($($x)*);
} else {
::core::debug_assert!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! debug_assert_eq {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::debug_assert_eq!($($x)*);
} else {
::core::debug_assert_eq!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! debug_assert_ne {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::debug_assert_ne!($($x)*);
} else {
::core::debug_assert_ne!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! todo {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::todo!($($x)*);
} else {
::core::todo!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! unreachable {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::unreachable!($($x)*);
} else {
::core::unreachable!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! panic {
($($x:tt)*) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::panic!($($x)*);
} else {
::core::panic!($($x)*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! trace {
($s:literal $(, $x:expr)* $(,)?) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::trace!($s $(, $x)*);
} else if #[cfg(feature = "log-04")] {
::log_04::trace!($s $(, $x)*);
} else {
let _ = ($( & $x ),*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! debug {
($s:literal $(, $x:expr)* $(,)?) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::debug!($s $(, $x)*);
} else if #[cfg(feature = "log-04")] {
::log_04::debug!($s $(, $x)*);
} else {
let _ = ($( & $x ),*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! info {
($s:literal $(, $x:expr)* $(,)?) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::info!($s $(, $x)*);
} else if #[cfg(feature = "log-04")] {
::log_04::info!($s $(, $x)*);
} else {
let _ = ($( & $x ),*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! warn {
($s:literal $(, $x:expr)* $(,)?) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::warn!($s $(, $x)*);
} else if #[cfg(feature = "log-04")] {
::log_04::warn!($s $(, $x)*);
} else {
let _ = ($( & $x ),*);
}
}
}
};
}
#[collapse_debuginfo(yes)]
macro_rules! error {
($s:literal $(, $x:expr)* $(,)?) => {
{
cfg_if::cfg_if! {
if #[cfg(feature = "defmt")] {
::defmt::error!($s $(, $x)*);
} else if #[cfg(feature = "log-04")] {
::log_04::error!($s $(, $x)*);
} else {
let _ = ($( & $x ),*);
}
}
}
};
}
#[cfg(feature = "defmt")]
#[collapse_debuginfo(yes)]
macro_rules! unwrap {
($($x:tt)*) => {
::defmt::unwrap!($($x)*)
};
}
#[cfg(not(feature = "defmt"))]
#[collapse_debuginfo(yes)]
macro_rules! unwrap {
($arg:expr) => {
match $crate::fmt::Try::into_result($arg) {
::core::result::Result::Ok(t) => t,
::core::result::Result::Err(e) => {
::core::panic!("unwrap of `{}` failed: {:?}", ::core::stringify!($arg), e);
}
}
};
($arg:expr, $($msg:expr),+ $(,)? ) => {
match $crate::fmt::Try::into_result($arg) {
::core::result::Result::Ok(t) => t,
::core::result::Result::Err(e) => {
::core::panic!("unwrap of `{}` failed: {}: {:?}", ::core::stringify!($arg), ::core::format_args!($($msg,)*), e);
}
}
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub struct NoneError;
pub trait Try {
type Ok;
type Error;
#[allow(unused)]
fn into_result(self) -> Result<Self::Ok, Self::Error>;
}
impl<T> Try for Option<T> {
type Ok = T;
type Error = NoneError;
#[inline]
fn into_result(self) -> Result<T, NoneError> {
self.ok_or(NoneError)
}
}
impl<T, E> Try for Result<T, E> {
type Ok = T;
type Error = E;
#[inline]
fn into_result(self) -> Self {
self
}
}

404
esp-sync/src/lib.rs Normal file
View File

@ -0,0 +1,404 @@
//! Syncronization primitives for ESP32 devices
//!
//! ## Feature Flags
#![doc = document_features::document_features!(feature_label = r#"<span class="stab portability"><code>{feature}</code></span>"#)]
#![doc(html_logo_url = "https://avatars.githubusercontent.com/u/46717278")]
#![cfg_attr(xtensa, feature(asm_experimental_arch))]
#![deny(missing_docs, rust_2018_idioms, rustdoc::all)]
// Don't trip up on broken/private links when running semver-checks
#![cfg_attr(
semver_checks,
allow(rustdoc::private_intra_doc_links, rustdoc::broken_intra_doc_links)
)]
#![no_std]
// MUST be the first module
mod fmt;
use core::{cell::UnsafeCell, marker::PhantomData};
pub mod raw;
use raw::{RawLock, SingleCoreInterruptLock};
/// Opaque token that can be used to release a lock.
// The interpretation of this value depends on the lock type that created it,
// but bit #31 is reserved for the reentry flag.
//
// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
// use bit #31 as our reentry flag.
// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
// undefined behavior: Quoting the ISA summary, table 64:
// Writing a non-zero value to these fields results in undefined processor
// behavior.
//
// Risc-V: we either get the restore state from bit 3 of mstatus, or
// we create the restore state from the current Priority, which is at most 31.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct RestoreState(u32, PhantomData<*const ()>);
impl RestoreState {
const REENTRY_FLAG: u32 = 1 << 31;
/// Creates a new RestoreState from a raw inner state.
///
/// # Safety
///
/// The `inner` value must be appropriate for the [RawMutex] implementation that creates it.
pub const unsafe fn new(inner: u32) -> Self {
Self(inner, PhantomData)
}
fn mark_reentry(&mut self) {
self.0 |= Self::REENTRY_FLAG;
}
fn is_reentry(self) -> bool {
self.0 & Self::REENTRY_FLAG != 0
}
/// Returns the raw value used to create this RestoreState.
pub fn inner(self) -> u32 {
self.0
}
}
#[cfg(single_core)]
mod single_core {
use core::cell::Cell;
#[repr(transparent)]
pub(super) struct LockedState {
locked: Cell<bool>,
}
impl LockedState {
pub const fn new() -> Self {
Self {
locked: Cell::new(false),
}
}
pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
let mut tkn = unsafe { lock.enter() };
let was_locked = self.locked.replace(true);
if was_locked {
tkn.mark_reentry();
}
tkn
}
/// # Safety:
///
/// This function must only be called if the lock was acquired by the
/// current thread.
pub unsafe fn unlock(&self) {
self.locked.set(false)
}
}
}
#[cfg(multi_core)]
mod multi_core {
use core::sync::atomic::{AtomicUsize, Ordering};
// Safety: Ensure that when adding new chips `raw_core` doesn't return this
// value.
const UNUSED_THREAD_ID_VALUE: usize = 0x100;
fn thread_id() -> usize {
// This method must never return UNUSED_THREAD_ID_VALUE
cfg_if::cfg_if! {
if #[cfg(all(multi_core, riscv))] {
riscv::register::mhartid::read()
} else if #[cfg(all(multi_core, xtensa))] {
(xtensa_lx::get_processor_id() & 0x2000) as usize
} else {
0
}
}
}
#[repr(transparent)]
pub(super) struct LockedState {
owner: AtomicUsize,
}
impl LockedState {
pub const fn new() -> Self {
Self {
owner: AtomicUsize::new(UNUSED_THREAD_ID_VALUE),
}
}
fn is_owned_by(&self, thread: usize) -> bool {
self.owner.load(Ordering::Relaxed) == thread
}
pub fn lock(&self, lock: &impl crate::RawLock) -> crate::RestoreState {
// We acquire the lock inside an interrupt-free context to prevent a subtle
// race condition:
// In case an interrupt handler tries to lock the same resource, it could win if
// the current thread is holding the lock but isn't yet in interrupt-free context.
// If we maintain non-reentrant semantics, this situation would panic.
// If we allow reentrancy, the interrupt handler would technically be a different
// context with the same `current_thread_id`, so it would be allowed to lock the
// resource in a theoretically incorrect way.
let try_lock = |current_thread_id| {
let mut tkn = unsafe { lock.enter() };
let try_lock_result = self
.owner
.compare_exchange(
UNUSED_THREAD_ID_VALUE,
current_thread_id,
Ordering::Acquire,
Ordering::Relaxed,
)
.map(|_| ());
match try_lock_result {
Ok(()) => Some(tkn),
Err(owner) if owner == current_thread_id => {
tkn.mark_reentry();
Some(tkn)
}
Err(_) => {
unsafe { lock.exit(tkn) };
None
}
}
};
let current_thread_id = thread_id();
loop {
if let Some(token) = try_lock(current_thread_id) {
return token;
}
}
}
/// # Safety:
///
/// This function must only be called if the lock was acquired by the
/// current thread.
pub unsafe fn unlock(&self) {
debug_assert!(
self.is_owned_by(thread_id()),
"tried to unlock a mutex locked on a different thread"
);
self.owner.store(UNUSED_THREAD_ID_VALUE, Ordering::Release);
}
}
}
#[cfg(multi_core)]
use multi_core::LockedState;
#[cfg(single_core)]
use single_core::LockedState;
/// A generic lock that wraps a [`RawLock`] implementation and tracks
/// whether the caller has locked recursively.
pub struct GenericRawMutex<L: RawLock> {
lock: L,
inner: LockedState,
}
// Safety: LockedState ensures thread-safety
unsafe impl<L: RawLock> Sync for GenericRawMutex<L> {}
impl<L: RawLock> GenericRawMutex<L> {
/// Create a new lock.
pub const fn new(lock: L) -> Self {
Self {
lock,
inner: LockedState::new(),
}
}
/// Acquires the lock.
///
/// # Safety
///
/// - Each release call must be paired with an acquire call.
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
unsafe fn acquire(&self) -> RestoreState {
self.inner.lock(&self.lock)
}
/// Releases the lock.
///
/// # Safety
///
/// - This function must only be called if the lock was acquired by the current thread.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
/// - Each release call must be paired with an acquire call.
unsafe fn release(&self, token: RestoreState) {
unsafe {
if !token.is_reentry() {
self.inner.unlock();
self.lock.exit(token)
}
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new_non_reentrant(self);
f()
}
/// Runs the callback with this lock locked.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new_reentrant(self);
f()
}
}
/// A mutual exclusion primitive.
///
/// This lock disables interrupts on the current core while locked.
#[cfg_attr(
multi_core,
doc = r#"It needs a bit of memory, but it does not take a global critical
section, making it preferrable for use in multi-core systems."#
)]
pub struct RawMutex {
inner: GenericRawMutex<SingleCoreInterruptLock>,
}
impl Default for RawMutex {
fn default() -> Self {
Self::new()
}
}
impl RawMutex {
/// Create a new lock.
pub const fn new() -> Self {
Self {
inner: GenericRawMutex::new(SingleCoreInterruptLock),
}
}
/// Acquires the lock.
///
/// # Safety
///
/// - Each release call must be paired with an acquire call.
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
pub unsafe fn acquire(&self) -> RestoreState {
unsafe { self.inner.acquire() }
}
/// Releases the lock.
///
/// # Safety
///
/// - This function must only be called if the lock was acquired by the current thread.
/// - The caller must ensure to release the locks in the reverse order they were acquired.
/// - Each release call must be paired with an acquire call.
pub unsafe fn release(&self, token: RestoreState) {
unsafe {
self.inner.release(token);
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock_non_reentrant<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock_non_reentrant(f)
}
/// Runs the callback with this lock locked.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
unsafe impl embassy_sync_06::blocking_mutex::raw::RawMutex for RawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
unsafe impl embassy_sync_07::blocking_mutex::raw::RawMutex for RawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
/// A non-reentrant (panicking) mutex.
///
/// This is largely equivalent to a `critical_section::Mutex<RefCell<T>>`, but accessing the inner
/// data doesn't hold a critical section on multi-core systems.
pub struct NonReentrantMutex<T> {
lock_state: RawMutex,
data: UnsafeCell<T>,
}
impl<T> NonReentrantMutex<T> {
/// Create a new instance
pub const fn new(data: T) -> Self {
Self {
lock_state: RawMutex::new(),
data: UnsafeCell::new(data),
}
}
/// Provide exclusive access to the protected data to the given closure.
///
/// Calling this reentrantly will panic.
pub fn with<R>(&self, f: impl FnOnce(&mut T) -> R) -> R {
self.lock_state
.lock_non_reentrant(|| f(unsafe { &mut *self.data.get() }))
}
}
unsafe impl<T: Send> Send for NonReentrantMutex<T> {}
unsafe impl<T: Send> Sync for NonReentrantMutex<T> {}
struct LockGuard<'a, L: RawLock> {
lock: &'a GenericRawMutex<L>,
token: RestoreState,
}
impl<'a, L: RawLock> LockGuard<'a, L> {
fn new_non_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
let this = Self::new_reentrant(lock);
assert!(!this.token.is_reentry(), "lock is not reentrant");
this
}
fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
let token = unsafe {
// SAFETY: the same lock will be released when dropping the guard.
// This ensures that the lock is released on the same thread, in the reverse
// order it was acquired.
lock.acquire()
};
Self { lock, token }
}
}
impl<L: RawLock> Drop for LockGuard<'_, L> {
fn drop(&mut self) {
unsafe { self.lock.release(self.token) };
}
}

80
esp-sync/src/raw.rs Normal file
View File

@ -0,0 +1,80 @@
//! TODO
use core::sync::atomic::{Ordering, compiler_fence};
use crate::RestoreState;
/// Trait for single-core locks.
pub trait RawLock {
/// Acquires the raw lock
///
/// # Safety
///
/// The returned tokens must be released in reverse order, on the same thread that they were
/// created on.
unsafe fn enter(&self) -> RestoreState;
/// Releases the raw lock
///
/// # Safety
///
/// - The `token` must be created by `self.enter()
/// - Tokens must be released in reverse order to their creation, on the same thread that they
/// were created on.
unsafe fn exit(&self, token: RestoreState);
}
/// A lock that disables interrupts.
pub struct SingleCoreInterruptLock;
impl RawLock for SingleCoreInterruptLock {
unsafe fn enter(&self) -> RestoreState {
cfg_if::cfg_if! {
if #[cfg(riscv)] {
let mut mstatus = 0u32;
unsafe { core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus); }
let token = mstatus & 0b1000;
} else if #[cfg(xtensa)] {
let token: u32;
unsafe { core::arch::asm!("rsil {0}, 5", out(reg) token); }
} else {
compile_error!("Unsupported architecture")
}
};
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
unsafe { RestoreState::new(token) }
}
unsafe fn exit(&self, token: RestoreState) {
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
let token = token.inner();
cfg_if::cfg_if! {
if #[cfg(riscv)] {
if token != 0 {
unsafe {
riscv::interrupt::enable();
}
}
} else if #[cfg(xtensa)] {
// Reserved bits in the PS register, these must be written as 0.
const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
debug_assert!(token & RESERVED_MASK == 0);
unsafe {
core::arch::asm!(
"wsr.ps {0}",
"rsync", in(reg) token)
}
} else {
compile_error!("Unsupported architecture")
}
}
}
}

View File

@ -248,9 +248,10 @@ esp-alloc = { path = "../esp-alloc", optional = true }
esp-bootloader-esp-idf = { path = "../esp-bootloader-esp-idf" }
esp-hal = { path = "../esp-hal" }
esp-hal-embassy = { path = "../esp-hal-embassy", optional = true }
esp-preempt = { path = "../esp-preempt", optional = true }
esp-preempt = { path = "../esp-preempt", optional = true }
esp-storage = { path = "../esp-storage", optional = true }
esp-radio = { path = "../esp-radio", optional = true }
esp-sync = { path = "../esp-sync" }
esp-radio = { path = "../esp-radio", optional = true }
portable-atomic = "1.11.0"
static_cell = { version = "2.1.0" }
semihosting = { version = "0.1", features= ["stdio", "panic-handler"] }

View File

@ -14,8 +14,9 @@ use esp_hal::{
software::{SoftwareInterrupt, SoftwareInterruptControl},
},
peripherals::Peripherals,
sync::{Locked, RawPriorityLimitedMutex},
sync::RawPriorityLimitedMutex,
};
use esp_sync::NonReentrantMutex;
use hil_test as _;
use portable_atomic::{AtomicU32, Ordering};
@ -62,8 +63,8 @@ mod tests {
}
#[test]
fn locked_can_provide_mutable_access() {
let flag = Locked::new(false);
fn non_reentrant_mutex_can_provide_mutable_access() {
let flag = NonReentrantMutex::new(false);
flag.with(|f| {
*f = true;
@ -75,8 +76,8 @@ mod tests {
#[test]
#[should_panic]
fn locked_is_not_reentrant() {
let flag = Locked::new(false);
fn non_reentrant_mutex_is_not_reentrant() {
let flag = NonReentrantMutex::new(false);
flag.with(|_f| {
flag.with(|f| {

View File

@ -57,6 +57,7 @@ pub enum Package {
EspPrintln,
EspRiscvRt,
EspStorage,
EspSync,
EspRadio,
EspRadioPreemptDriver,
EspPreempt,
@ -77,6 +78,7 @@ impl Package {
self,
EspBacktrace
| EspBootloaderEspIdf
| EspAlloc
| EspHal
| EspHalEmbassy
| EspMetadataGenerated
@ -85,6 +87,7 @@ impl Package {
| EspPrintln
| EspPreempt
| EspStorage
| EspSync
| EspRadio
)
}
@ -228,7 +231,12 @@ impl Package {
features.push("coex".to_owned());
}
if features.iter().any(|f| {
f == "csi" || f == "ble" || f == "esp-now" || f == "sniffer" || f == "coex" || f == "ieee802154"
f == "csi"
|| f == "ble"
|| f == "esp-now"
|| f == "sniffer"
|| f == "coex"
|| f == "ieee802154"
}) {
features.push("unstable".to_owned());
}