Make the critical-section impl optional (#3293)

* Make CS optional

* Feature -> config

* Add note about restore state
This commit is contained in:
Dániel Buga 2025-04-02 13:57:54 +02:00 committed by GitHub
parent dca82d60b1
commit 7289b27e65
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
10 changed files with 121 additions and 73 deletions

View File

@ -120,7 +120,7 @@ This will use software-interrupt 3 which isn't available for anything else to wa
// Manual critical section implementation that only masks interrupts handlers.
// We must not acquire the cross-core on dual-core systems because that would
// prevent the other core from doing useful work while this core is sleeping.
let token: critical_section::RawRestoreState;
let token: u32;
unsafe { core::arch::asm!("rsil {0}, 5", out(reg) token) };
// we do not care about race conditions between the load and store operations,

View File

@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `ESP_HAL_CONFIG_STACK_GUARD_OFFSET` and `ESP_HAL_CONFIG_STACK_GUARD_VALUE` to configure Rust's [Stack smashing protection](https://doc.rust-lang.org/rustc/exploit-mitigations.html#stack-smashing-protection) (#3203)
- Experimental metadata in the output `.elf` (#3276)
- `PeripheralInput::connect_input_to_peripheral` and `PeripheralOuptut::{connect_peripheral_to_output, disconnect_from_peripheral_output}` (#3302)
- `ESP_HAL_CONFIG_CRITICAL_SECTION_IMPL` to allow opting out of the default `critical-section` implementation (#3293)
### Changed
@ -23,6 +24,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- `SpiDma<Async>` now uses the SPI interrupt (instead of DMA) to wait for completion (#3303)
- `gpio::interconnect` types now have a lifetime associated with them (#3302)
- The `critical-section` implementation is now gated behind the `critical-section-impl` feature (#3293)
### Fixed

View File

@ -21,7 +21,7 @@ bitflags = "2.8.0"
bytemuck = "1.21.0"
bitfield = "0.18.1"
cfg-if = "1.0.0"
critical-section = "1.2.0"
critical-section = { version = "1.2.0", features = ["restore-state-u32"] }
defmt = { version = "0.3.10", optional = true }
delegate = "0.13.2"
digest = { version = "0.10.7", default-features = false, optional = true }
@ -68,12 +68,10 @@ esp32s3 = { version = "0.31.0", features = ["critical-section", "rt"], optional
[target.'cfg(target_arch = "riscv32")'.dependencies]
riscv = { version = "0.12.1" }
esp-riscv-rt = { version = "0.10.0", path = "../esp-riscv-rt" }
critical-section = { version = "1.2.0", features = ["restore-state-u8"] }
[target.'cfg(target_arch = "xtensa")'.dependencies]
xtensa-lx = { version = "0.10.0", path = "../xtensa-lx" }
xtensa-lx-rt = { version = "0.18.0", path = "../xtensa-lx-rt" }
critical-section = { version = "1.2.0", features = ["restore-state-u32"] }
[build-dependencies]
basic-toml = "0.1.9"

View File

@ -51,7 +51,7 @@ fn main() -> Result<(), Box<dyn Error>> {
panic!("
Seems you are building for an unsupported or wrong target (e.g. the host environment).
Maybe you are missing the `target` in `.cargo/config.toml` or you have configs overriding it?
See https://doc.rust-lang.org/cargo/reference/config.html#hierarchical-structure
");
}
@ -145,7 +145,13 @@ fn main() -> Result<(), Box<dyn Error>> {
"The value to be written to the stack guard variable.",
Value::Integer(0xDEED_BAAD),
None
)
),
(
"impl-critical-section",
"Provide a `critical-section` implementation. Note that if disabled, you will need to provide a `critical-section` implementation which is using `critical-section/restore-state-u32`.",
Value::Bool(true),
None
),
], true);
// RISC-V and Xtensa devices each require some special handling and processing

View File

@ -169,10 +169,10 @@ impl Priority {
}
}
impl TryFrom<u8> for Priority {
impl TryFrom<u32> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Priority::None),
1 => Ok(Priority::Priority1),
@ -195,6 +195,14 @@ impl TryFrom<u8> for Priority {
}
}
impl TryFrom<u8> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Priority::try_from(value as u32)
}
}
/// The interrupts reserved by the HAL
#[cfg_attr(place_switch_tables_in_ram, link_section = ".rwtext")]
pub static RESERVED_INTERRUPTS: &[usize] = PRIORITY_TO_INTERRUPT;

View File

@ -394,10 +394,10 @@ mod vectored {
}
}
impl TryFrom<u8> for Priority {
impl TryFrom<u32> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
fn try_from(value: u32) -> Result<Self, Self::Error> {
match value {
0 => Ok(Priority::None),
1 => Ok(Priority::Priority1),
@ -408,6 +408,14 @@ mod vectored {
}
}
impl TryFrom<u8> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
Self::try_from(value as u32)
}
}
impl CpuInterrupt {
#[inline]
fn level(&self) -> Priority {

View File

@ -7,15 +7,58 @@ use core::cell::UnsafeCell;
use crate::interrupt::Priority;
/// Opaque token that can be used to release a lock.
// The interpretation of this value depends on the lock type that created it,
// but bit #31 is reserved for the reentry flag.
//
// Xtensa: PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can
// use bit #31 as our reentry flag.
// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
// undefined behavior: Quoting the ISA summary, table 64:
// Writing a non-zero value to these fields results in undefined processor
// behavior.
//
// Risc-V: we either get the restore state from bit 3 of mstatus, or
// we create the restore state from the current Priority, which is at most 31.
#[derive(Clone, Copy, Debug)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct RestoreState(u32);
impl RestoreState {
const REENTRY_FLAG: u32 = 1 << 31;
fn mark_reentry(&mut self) {
self.0 |= Self::REENTRY_FLAG;
}
fn is_reentry(&self) -> bool {
self.0 & Self::REENTRY_FLAG != 0
}
}
impl From<Priority> for RestoreState {
fn from(priority: Priority) -> Self {
Self(priority as _)
}
}
impl TryFrom<RestoreState> for Priority {
type Error = crate::interrupt::Error;
fn try_from(token: RestoreState) -> Result<Self, Self::Error> {
Self::try_from(token.0)
}
}
mod single_core {
use core::sync::atomic::{compiler_fence, Ordering};
use super::RestoreState;
use crate::interrupt::Priority;
/// Trait for single-core locks.
pub trait RawLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState;
unsafe fn exit(&self, token: critical_section::RawRestoreState);
unsafe fn enter(&self) -> RestoreState;
unsafe fn exit(&self, token: RestoreState);
}
/// A lock that disables interrupts below a certain priority.
@ -34,7 +77,7 @@ mod single_core {
}
impl RawLock for PriorityLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState {
unsafe fn enter(&self) -> RestoreState {
#[cfg(riscv)]
if self.0 == Priority::max() {
return InterruptLock.enter();
@ -47,10 +90,10 @@ mod single_core {
// disabled.
compiler_fence(Ordering::SeqCst);
prev_interrupt_priority as _
RestoreState::from(prev_interrupt_priority)
}
unsafe fn exit(&self, token: critical_section::RawRestoreState) {
unsafe fn exit(&self, token: RestoreState) {
#[cfg(riscv)]
if self.0 == Priority::max() {
return InterruptLock.exit(token);
@ -60,9 +103,6 @@ mod single_core {
// enabled.
compiler_fence(Ordering::SeqCst);
#[cfg(xtensa)]
let token = token as u8;
let priority = unwrap!(Priority::try_from(token));
unsafe { Self::change_current_level(priority) };
}
@ -72,14 +112,14 @@ mod single_core {
pub struct InterruptLock;
impl RawLock for InterruptLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState {
unsafe fn enter(&self) -> RestoreState {
cfg_if::cfg_if! {
if #[cfg(riscv)] {
let mut mstatus = 0u32;
core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);
let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState;
let token = mstatus & 0b1000;
} else if #[cfg(xtensa)] {
let token: critical_section::RawRestoreState;
let token: u32;
core::arch::asm!("rsil {0}, 5", out(reg) token);
} else {
compile_error!("Unsupported architecture")
@ -90,14 +130,16 @@ mod single_core {
// disabled.
compiler_fence(Ordering::SeqCst);
token
RestoreState(token)
}
unsafe fn exit(&self, token: critical_section::RawRestoreState) {
unsafe fn exit(&self, token: RestoreState) {
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
let RestoreState(token) = token;
cfg_if::cfg_if! {
if #[cfg(riscv)] {
if token != 0 {
@ -172,21 +214,6 @@ mod multicore {
}
}
cfg_if::cfg_if! {
if #[cfg(riscv)] {
// The restore state is a u8 that is casted from a bool, so it has a value of
// 0x00 or 0x01 before we add the reentry flag to it.
pub const REENTRY_FLAG: u8 = 1 << 7;
} else if #[cfg(xtensa)] {
// PS has 15 useful bits. Bits 12..16 and 19..32 are unused, so we can use bit
// #31 as our reentry flag.
// We can assume the reserved bit is 0 otherwise rsil - wsr pairings would be
// undefined behavior: Quoting the ISA summary, table 64:
// Writing a non-zero value to these fields results in undefined processor behavior.
pub const REENTRY_FLAG: u32 = 1 << 31;
}
}
/// A generic lock that wraps [`single_core::RawLock`] and
/// [`multicore::AtomicLock`] and tracks whether the caller has locked
/// recursively.
@ -220,13 +247,13 @@ impl<L: single_core::RawLock> GenericRawMutex<L> {
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
unsafe fn acquire(&self) -> critical_section::RawRestoreState {
unsafe fn acquire(&self) -> RestoreState {
cfg_if::cfg_if! {
if #[cfg(single_core)] {
let mut tkn = unsafe { self.lock.enter() };
let was_locked = self.is_locked.replace(true);
if was_locked {
tkn |= REENTRY_FLAG;
tkn.mark_reentry();
}
tkn
} else if #[cfg(multi_core)] {
@ -244,7 +271,7 @@ impl<L: single_core::RawLock> GenericRawMutex<L> {
match self.inner.try_lock(current_thread_id) {
Ok(()) => Some(tkn),
Err(owner) if owner == current_thread_id => {
tkn |= REENTRY_FLAG;
tkn.mark_reentry();
Some(tkn)
}
Err(_) => {
@ -273,8 +300,8 @@ impl<L: single_core::RawLock> GenericRawMutex<L> {
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
/// - Each release call must be paired with an acquire call.
unsafe fn release(&self, token: critical_section::RawRestoreState) {
if token & REENTRY_FLAG == 0 {
unsafe fn release(&self, token: RestoreState) {
if !token.is_reentry() {
#[cfg(multi_core)]
self.inner.unlock();
@ -329,7 +356,7 @@ impl RawMutex {
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
pub unsafe fn acquire(&self) -> critical_section::RawRestoreState {
pub unsafe fn acquire(&self) -> RestoreState {
self.inner.acquire()
}
@ -342,7 +369,7 @@ impl RawMutex {
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
/// - Each release call must be paired with an acquire call.
pub unsafe fn release(&self, token: critical_section::RawRestoreState) {
pub unsafe fn release(&self, token: RestoreState) {
self.inner.release(token);
}
@ -437,31 +464,15 @@ impl<T> Locked<T> {
unsafe impl<T> Sync for Locked<T> {}
struct CriticalSection;
critical_section::set_impl!(CriticalSection);
static CRITICAL_SECTION: RawMutex = RawMutex::new();
unsafe impl critical_section::Impl for CriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
CRITICAL_SECTION.acquire()
}
unsafe fn release(token: critical_section::RawRestoreState) {
CRITICAL_SECTION.release(token);
}
}
struct LockGuard<'a, L: single_core::RawLock> {
lock: &'a GenericRawMutex<L>,
token: critical_section::RawRestoreState,
token: RestoreState,
}
impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
fn new(lock: &'a GenericRawMutex<L>) -> Self {
let this = Self::new_reentrant(lock);
assert!(this.token & REENTRY_FLAG == 0, "lock is not reentrant");
assert!(!this.token.is_reentry(), "lock is not reentrant");
this
}
@ -482,3 +493,22 @@ impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
unsafe { self.lock.release(self.token) };
}
}
#[cfg(impl_critical_section)]
mod critical_section {
struct CriticalSection;
critical_section::set_impl!(CriticalSection);
static CRITICAL_SECTION: super::RawMutex = super::RawMutex::new();
unsafe impl critical_section::Impl for CriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
CRITICAL_SECTION.acquire().0
}
unsafe fn release(token: critical_section::RawRestoreState) {
CRITICAL_SECTION.release(super::RestoreState(token));
}
}
}

View File

@ -86,10 +86,6 @@ extern "C" fn notify_host_recv(data: *mut u8, len: u16) -> i32 {
0
}
#[cfg(target_arch = "riscv32")]
type InterruptsFlagType = u8;
#[cfg(target_arch = "xtensa")]
type InterruptsFlagType = u32;
static mut G_INTER_FLAGS: [InterruptsFlagType; 10] = [0; 10];

View File

@ -617,15 +617,14 @@ unsafe extern "C" fn ble_npl_get_time_forever() -> u32 {
unsafe extern "C" fn ble_npl_hw_exit_critical(mask: u32) {
trace!("ble_npl_hw_exit_critical {}", mask);
critical_section::release(core::mem::transmute::<u8, critical_section::RestoreState>(
mask as u8,
critical_section::release(core::mem::transmute::<u32, critical_section::RestoreState>(
mask,
));
}
unsafe extern "C" fn ble_npl_hw_enter_critical() -> u32 {
trace!("ble_npl_hw_enter_critical");
let as_u8: u8 = core::mem::transmute(critical_section::acquire());
as_u8 as u32
core::mem::transmute::<critical_section::RestoreState, u32>(critical_section::acquire())
}
unsafe extern "C" fn ble_npl_hw_set_isr(_no: i32, _mask: u32) {

View File

@ -223,7 +223,8 @@ pub unsafe extern "C" fn wifi_int_disable(
) -> u32 {
trace!("wifi_int_disable");
// TODO: can we use wifi_int_mux?
unsafe { WIFI_LOCK.acquire() as _ }
let token = unsafe { WIFI_LOCK.acquire() };
unsafe { core::mem::transmute::<esp_hal::sync::RestoreState, u32>(token) }
}
/// **************************************************************************
@ -246,7 +247,7 @@ pub unsafe extern "C" fn wifi_int_restore(
tmp: u32,
) {
trace!("wifi_int_restore");
let token = tmp as critical_section::RawRestoreState;
let token = unsafe { core::mem::transmute::<u32, esp_hal::sync::RestoreState>(tmp) };
unsafe { WIFI_LOCK.release(token) }
}