Add priority-limited locks (#2684)

* Add priority-limited locks

* Rename to RawMutex, only provide lock() publicly

* Explode implementation and move into the interrupt module
This commit is contained in:
Dániel Buga 2024-12-16 09:39:42 +01:00 committed by GitHub
parent 3a03dd88c7
commit bc0bedd628
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 481 additions and 106 deletions

View File

@ -138,7 +138,15 @@ This will use software-interrupt 3 which isn't available for anything else to wa
// sections in Xtensa are implemented via increasing `PS.INTLEVEL`.
// The critical section ends here. Take care not add code after
// `waiti` if it needs to be inside the CS.
unsafe { core::arch::asm!("waiti 0") };
// Do not lower INTLEVEL below the current value.
match token & 0x0F {
0 => unsafe { core::arch::asm!("waiti 0") },
1 => unsafe { core::arch::asm!("waiti 1") },
2 => unsafe { core::arch::asm!("waiti 2") },
3 => unsafe { core::arch::asm!("waiti 3") },
4 => unsafe { core::arch::asm!("waiti 4") },
_ => unsafe { core::arch::asm!("waiti 5") },
}
}
// If this races and some waker sets the signal, we'll reset it, but still poll.
SIGNAL_WORK_THREAD_MODE[cpu].store(false, Ordering::Relaxed);

View File

@ -60,6 +60,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Dropped GPIO futures stop listening for interrupts (#2625)
- UART driver's `StopBits` enum variants now correctly use UpperCamelCase (#2669)
- The `PeripheralInput` and `PeripheralOutput` traits are now sealed (#2690)
- `esp_hal::sync::Lock` has been renamed to RawMutex (#2684)
### Fixed

View File

@ -117,7 +117,7 @@ pub enum CpuInterrupt {
}
/// Interrupt priority levels.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Priority {
@ -167,6 +167,32 @@ impl Priority {
}
}
impl TryFrom<u8> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Priority::None),
1 => Ok(Priority::Priority1),
2 => Ok(Priority::Priority2),
3 => Ok(Priority::Priority3),
4 => Ok(Priority::Priority4),
5 => Ok(Priority::Priority5),
6 => Ok(Priority::Priority6),
7 => Ok(Priority::Priority7),
8 => Ok(Priority::Priority8),
9 => Ok(Priority::Priority9),
10 => Ok(Priority::Priority10),
11 => Ok(Priority::Priority11),
12 => Ok(Priority::Priority12),
13 => Ok(Priority::Priority13),
14 => Ok(Priority::Priority14),
15 => Ok(Priority::Priority15),
_ => Err(Error::InvalidInterruptPriority),
}
}
}
/// The interrupts reserved by the HAL
#[cfg_attr(place_switch_tables_in_ram, link_section = ".rwtext")]
pub static RESERVED_INTERRUPTS: &[usize] = PRIORITY_TO_INTERRUPT;
@ -681,6 +707,34 @@ mod classic {
let intr = &*crate::peripherals::INTERRUPT_CORE0::PTR;
intr.cpu_int_thresh().write(|w| w.bits(stored_prio));
}
/// Get the current run level (the level below which interrupts are masked).
pub(crate) fn current_runlevel() -> Priority {
let intr = unsafe { crate::peripherals::INTERRUPT_CORE0::steal() };
let prev_interrupt_priority = intr.cpu_int_thresh().read().bits().saturating_sub(1) as u8;
unwrap!(Priority::try_from(prev_interrupt_priority))
}
/// Changes the current run level (the level below which interrupts are
/// masked), and returns the previous run level.
///
/// # Safety
///
/// This function must only be used to raise the runlevel and to restore it
/// to a previous value. It must not be used to arbitrarily lower the
/// runlevel.
pub(crate) unsafe fn change_current_runlevel(level: Priority) -> Priority {
let prev_interrupt_priority = current_runlevel();
// The CPU responds to interrupts `>= level`, but we want to also disable
// interrupts at `level` so we set the threshold to `level + 1`.
crate::peripherals::INTERRUPT_CORE0::steal()
.cpu_int_thresh()
.write(|w| w.bits(level as u32 + 1));
prev_interrupt_priority
}
}
#[cfg(plic)]
@ -817,4 +871,36 @@ mod plic {
plic.mxint_thresh()
.write(|w| w.cpu_mxint_thresh().bits(stored_prio as u8));
}
/// Get the current run level (the level below which interrupts are masked).
pub(crate) fn current_runlevel() -> Priority {
let prev_interrupt_priority = unsafe { crate::peripherals::PLIC_MX::steal() }
.mxint_thresh()
.read()
.cpu_mxint_thresh()
.bits()
.saturating_sub(1);
unwrap!(Priority::try_from(prev_interrupt_priority))
}
/// Changes the current run level (the level below which interrupts are
/// masked), and returns the previous run level.
///
/// # Safety
///
/// This function must only be used to raise the runlevel and to restore it
/// to a previous value. It must not be used to arbitrarily lower the
/// runlevel.
pub(crate) unsafe fn change_current_runlevel(level: Priority) -> Priority {
let prev_interrupt_priority = current_runlevel();
// The CPU responds to interrupts `>= level`, but we want to also disable
// interrupts at `level` so we set the threshold to `level + 1`.
crate::peripherals::PLIC_MX::steal()
.mxint_thresh()
.write(|w| w.cpu_mxint_thresh().bits(level as u8 + 1));
prev_interrupt_priority
}
}

View File

@ -334,13 +334,45 @@ unsafe fn core1_interrupt_peripheral() -> *const crate::peripherals::interrupt_c
crate::peripherals::INTERRUPT_CORE1::PTR
}
/// Get the current run level (the level below which interrupts are masked).
pub(crate) fn current_runlevel() -> Priority {
let ps: u32;
unsafe { core::arch::asm!("rsr.ps {0}", out(reg) ps) };
let prev_interrupt_priority = ps as u8 & 0x0F;
unwrap!(Priority::try_from(prev_interrupt_priority))
}
/// Changes the current run level (the level below which interrupts are
/// masked), and returns the previous run level.
///
/// # Safety
///
/// This function must only be used to raise the runlevel and to restore it
/// to a previous value. It must not be used to arbitrarily lower the
/// runlevel.
pub(crate) unsafe fn change_current_runlevel(level: Priority) -> Priority {
let token: u32;
match level {
Priority::None => core::arch::asm!("rsil {0}, 0", out(reg) token),
Priority::Priority1 => core::arch::asm!("rsil {0}, 1", out(reg) token),
Priority::Priority2 => core::arch::asm!("rsil {0}, 2", out(reg) token),
Priority::Priority3 => core::arch::asm!("rsil {0}, 3", out(reg) token),
};
let prev_interrupt_priority = token as u8 & 0x0F;
unwrap!(Priority::try_from(prev_interrupt_priority))
}
mod vectored {
use procmacros::ram;
use super::*;
/// Interrupt priority levels.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[repr(u8)]
pub enum Priority {
@ -366,6 +398,20 @@ mod vectored {
}
}
impl TryFrom<u8> for Priority {
type Error = Error;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(Priority::None),
1 => Ok(Priority::Priority1),
2 => Ok(Priority::Priority2),
3 => Ok(Priority::Priority3),
_ => Err(Error::InvalidInterrupt),
}
}
}
impl CpuInterrupt {
#[inline]
fn level(&self) -> Priority {

View File

@ -1,52 +1,109 @@
//! Under construction: This is public only for tests, please avoid using it.
//! Under construction: This is public only for tests, please avoid using it
//! directly.
#[cfg(single_core)]
use core::cell::Cell;
use core::cell::UnsafeCell;
use crate::interrupt::Priority;
mod single_core {
use core::sync::atomic::{compiler_fence, Ordering};
pub unsafe fn disable_interrupts() -> critical_section::RawRestoreState {
cfg_if::cfg_if! {
if #[cfg(riscv)] {
let mut mstatus = 0u32;
core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);
let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState;
} else if #[cfg(xtensa)] {
let token: critical_section::RawRestoreState;
core::arch::asm!("rsil {0}, 5", out(reg) token);
} else {
compile_error!("Unsupported architecture")
}
};
use crate::interrupt::Priority;
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
token
/// Trait for single-core locks.
pub trait RawLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState;
unsafe fn exit(&self, token: critical_section::RawRestoreState);
}
pub unsafe fn reenable_interrupts(token: critical_section::RawRestoreState) {
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
/// A lock that disables interrupts below a certain priority.
pub struct PriorityLock(pub Priority);
cfg_if::cfg_if! {
if #[cfg(riscv)] {
if token != 0 {
esp_riscv_rt::riscv::interrupt::enable();
impl PriorityLock {
fn current_priority() -> Priority {
crate::interrupt::current_runlevel()
}
/// Prevents interrupts above `level` from firing and returns the
/// current run level.
unsafe fn change_current_level(level: Priority) -> Priority {
crate::interrupt::change_current_runlevel(level)
}
}
impl RawLock for PriorityLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState {
let prev_interrupt_priority = unsafe { Self::change_current_level(self.0) };
assert!(prev_interrupt_priority <= self.0);
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
prev_interrupt_priority as _
}
unsafe fn exit(&self, token: critical_section::RawRestoreState) {
assert!(Self::current_priority() <= self.0);
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
#[cfg(xtensa)]
let token = token as u8;
let priority = unwrap!(Priority::try_from(token));
unsafe { Self::change_current_level(priority) };
}
}
/// A lock that disables interrupts.
pub struct InterruptLock;
impl RawLock for InterruptLock {
unsafe fn enter(&self) -> critical_section::RawRestoreState {
cfg_if::cfg_if! {
if #[cfg(riscv)] {
let mut mstatus = 0u32;
core::arch::asm!("csrrci {0}, mstatus, 8", inout(reg) mstatus);
let token = ((mstatus & 0b1000) != 0) as critical_section::RawRestoreState;
} else if #[cfg(xtensa)] {
let token: critical_section::RawRestoreState;
core::arch::asm!("rsil {0}, 5", out(reg) token);
} else {
compile_error!("Unsupported architecture")
}
};
// Ensure no subsequent memory accesses are reordered to before interrupts are
// disabled.
compiler_fence(Ordering::SeqCst);
token
}
unsafe fn exit(&self, token: critical_section::RawRestoreState) {
// Ensure no preceeding memory accesses are reordered to after interrupts are
// enabled.
compiler_fence(Ordering::SeqCst);
cfg_if::cfg_if! {
if #[cfg(riscv)] {
if token != 0 {
esp_riscv_rt::riscv::interrupt::enable();
}
} else if #[cfg(xtensa)] {
// Reserved bits in the PS register, these must be written as 0.
const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
debug_assert!(token & RESERVED_MASK == 0);
core::arch::asm!(
"wsr.ps {0}",
"rsync", in(reg) token)
} else {
compile_error!("Unsupported architecture")
}
} else if #[cfg(xtensa)] {
// Reserved bits in the PS register, these must be written as 0.
const RESERVED_MASK: u32 = 0b1111_1111_1111_1000_1111_0000_0000_0000;
debug_assert!(token & RESERVED_MASK == 0);
core::arch::asm!(
"wsr.ps {0}",
"rsync", in(reg) token)
} else {
compile_error!("Unsupported architecture")
}
}
}
@ -121,26 +178,24 @@ cfg_if::cfg_if! {
}
}
/// A lock that can be used to protect shared resources.
pub struct Lock {
/// A generic lock that wraps [`single_core::RawLock`] and
/// [`multicore::AtomicLock`] and tracks whether the caller has locked
/// recursively.
struct GenericRawMutex<L: single_core::RawLock> {
lock: L,
#[cfg(multi_core)]
inner: multicore::AtomicLock,
#[cfg(single_core)]
is_locked: Cell<bool>,
}
unsafe impl Sync for Lock {}
unsafe impl<L: single_core::RawLock> Sync for GenericRawMutex<L> {}
impl Default for Lock {
fn default() -> Self {
Self::new()
}
}
impl Lock {
impl<L: single_core::RawLock> GenericRawMutex<L> {
/// Create a new lock.
pub const fn new() -> Self {
pub const fn new(lock: L) -> Self {
Self {
lock,
#[cfg(multi_core)]
inner: multicore::AtomicLock::new(),
#[cfg(single_core)]
@ -156,10 +211,10 @@ impl Lock {
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
pub unsafe fn acquire(&self) -> critical_section::RawRestoreState {
unsafe fn acquire(&self) -> critical_section::RawRestoreState {
cfg_if::cfg_if! {
if #[cfg(single_core)] {
let mut tkn = unsafe { single_core::disable_interrupts() };
let mut tkn = unsafe { self.lock.enter() };
let was_locked = self.is_locked.replace(true);
if was_locked {
tkn |= REENTRY_FLAG;
@ -175,7 +230,7 @@ impl Lock {
// context with the same `current_thread_id`, so it would be allowed to lock the
// resource in a theoretically incorrect way.
let try_lock = |current_thread_id| {
let mut tkn = unsafe { single_core::disable_interrupts() };
let mut tkn = unsafe { self.lock.enter() };
match self.inner.try_lock(current_thread_id) {
Ok(()) => Some(tkn),
@ -184,7 +239,7 @@ impl Lock {
Some(tkn)
}
Err(_) => {
unsafe { single_core::reenable_interrupts(tkn) };
unsafe { self.lock.exit(tkn) };
None
}
}
@ -209,7 +264,7 @@ impl Lock {
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
/// - Each release call must be paired with an acquire call.
pub unsafe fn release(&self, token: critical_section::RawRestoreState) {
unsafe fn release(&self, token: critical_section::RawRestoreState) {
if token & REENTRY_FLAG == 0 {
#[cfg(multi_core)]
self.inner.unlock();
@ -217,25 +272,140 @@ impl Lock {
#[cfg(single_core)]
self.is_locked.set(false);
single_core::reenable_interrupts(token);
self.lock.exit(token)
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new(self);
f()
}
}
/// A mutual exclusion primitive.
///
/// This lock disables interrupts on the current core while locked.
#[cfg_attr(
multi_core,
doc = r#"It needs a bit of memory, but it does not take a global critical
section, making it preferrable for use in multi-core systems."#
)]
pub struct RawMutex {
inner: GenericRawMutex<single_core::InterruptLock>,
}
impl Default for RawMutex {
fn default() -> Self {
Self::new()
}
}
impl RawMutex {
/// Create a new lock.
pub const fn new() -> Self {
Self {
inner: GenericRawMutex::new(single_core::InterruptLock),
}
}
/// Acquires the lock.
///
/// # Safety
///
/// - Each release call must be paired with an acquire call.
/// - The returned token must be passed to the corresponding `release` call.
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
pub unsafe fn acquire(&self) -> critical_section::RawRestoreState {
self.inner.acquire()
}
/// Releases the lock.
///
/// # Safety
///
/// - This function must only be called if the lock was acquired by the
/// current thread.
/// - The caller must ensure to release the locks in the reverse order they
/// were acquired.
/// - Each release call must be paired with an acquire call.
pub unsafe fn release(&self, token: critical_section::RawRestoreState) {
self.inner.release(token);
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new();
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
// embassy_sync semantics allow reentrancy.
let _token = LockGuard::new_reentrant(&self.inner);
f()
}
}
/// A mutual exclusion primitive that only disables a limited range of
/// interrupts.
///
/// Trying to acquire or release the lock at a higher priority level will panic.
pub struct RawPriorityLimitedMutex {
inner: GenericRawMutex<single_core::PriorityLock>,
}
impl RawPriorityLimitedMutex {
/// Create a new lock that is accessible at or below the given `priority`.
pub const fn new(priority: Priority) -> Self {
Self {
inner: GenericRawMutex::new(single_core::PriorityLock(priority)),
}
}
/// Runs the callback with this lock locked.
///
/// Note that this function is not reentrant, calling it reentrantly will
/// panic.
pub fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
self.inner.lock(f)
}
}
unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawPriorityLimitedMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self::new(Priority::max());
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
// embassy_sync semantics allow reentrancy.
let _token = LockGuard::new_reentrant(&self.inner);
f()
}
}
// Prefer this over a critical-section as this allows you to have multiple
// locks active at the same time rather than using the global mutex that is
// critical-section.
pub(crate) fn lock<T>(lock: &Lock, f: impl FnOnce() -> T) -> T {
let _token = LockGuard::new(lock);
f()
pub(crate) fn lock<T>(lock: &RawMutex, f: impl FnOnce() -> T) -> T {
lock.lock(f)
}
/// Data protected by a [Lock].
/// Data protected by a [RawMutex].
///
/// This is largely equivalent to a `Mutex<RefCell<T>>`, but accessing the inner
/// data doesn't hold a critical section on multi-core systems.
pub struct Locked<T> {
lock_state: Lock,
lock_state: RawMutex,
data: UnsafeCell<T>,
}
@ -243,7 +413,7 @@ impl<T> Locked<T> {
/// Create a new instance
pub const fn new(data: T) -> Self {
Self {
lock_state: Lock::new(),
lock_state: RawMutex::new(),
data: UnsafeCell::new(data),
}
}
@ -262,7 +432,7 @@ struct CriticalSection;
critical_section::set_impl!(CriticalSection);
static CRITICAL_SECTION: Lock = Lock::new();
static CRITICAL_SECTION: RawMutex = RawMutex::new();
unsafe impl critical_section::Impl for CriticalSection {
unsafe fn acquire() -> critical_section::RawRestoreState {
@ -274,46 +444,19 @@ unsafe impl critical_section::Impl for CriticalSection {
}
}
/// A mutual exclusion primitive.
///
/// This is an implementation of `embassy_sync::blocking_mutex::raw::RawMutex`.
/// It needs a bit of memory, but it does not take a global critical section,
/// making it preferrable for use in multi-core systems.
///
/// On single core systems, this is equivalent to `CriticalSectionRawMutex`.
pub struct RawMutex(Lock);
impl RawMutex {
/// Create a new mutex.
#[allow(clippy::new_without_default)]
pub const fn new() -> Self {
Self(Lock::new())
}
}
unsafe impl embassy_sync::blocking_mutex::raw::RawMutex for RawMutex {
#[allow(clippy::declare_interior_mutable_const)]
const INIT: Self = Self(Lock::new());
fn lock<R>(&self, f: impl FnOnce() -> R) -> R {
let _token = LockGuard::new_reentrant(&self.0);
f()
}
}
struct LockGuard<'a> {
lock: &'a Lock,
struct LockGuard<'a, L: single_core::RawLock> {
lock: &'a GenericRawMutex<L>,
token: critical_section::RawRestoreState,
}
impl<'a> LockGuard<'a> {
fn new(lock: &'a Lock) -> Self {
impl<'a, L: single_core::RawLock> LockGuard<'a, L> {
fn new(lock: &'a GenericRawMutex<L>) -> Self {
let this = Self::new_reentrant(lock);
assert!(this.token & REENTRY_FLAG == 0, "lock is not reentrant");
this
}
fn new_reentrant(lock: &'a Lock) -> Self {
fn new_reentrant(lock: &'a GenericRawMutex<L>) -> Self {
let token = unsafe {
// SAFETY: the same lock will be released when dropping the guard.
// This ensures that the lock is released on the same thread, in the reverse
@ -325,7 +468,7 @@ impl<'a> LockGuard<'a> {
}
}
impl Drop for LockGuard<'_> {
impl<L: single_core::RawLock> Drop for LockGuard<'_, L> {
fn drop(&mut self) {
unsafe { self.lock.release(self.token) };
}

View File

@ -26,7 +26,7 @@ use crate::{
interrupt::{self, InterruptConfigurable, InterruptHandler},
peripheral::Peripheral,
peripherals::{Interrupt, SYSTIMER},
sync::{lock, Lock},
sync::{lock, RawMutex},
system::{Peripheral as PeripheralEnable, PeripheralClockControl},
Cpu,
};
@ -628,8 +628,8 @@ impl Peripheral for Alarm {
impl crate::private::Sealed for Alarm {}
static CONF_LOCK: Lock = Lock::new();
static INT_ENA_LOCK: Lock = Lock::new();
static CONF_LOCK: RawMutex = RawMutex::new();
static INT_ENA_LOCK: RawMutex = RawMutex::new();
// Async functionality of the system timer.
mod asynch {

View File

@ -76,13 +76,13 @@ use crate::{
peripheral::Peripheral,
peripherals::{timg0::RegisterBlock, Interrupt, TIMG0},
private::Sealed,
sync::{lock, Lock},
sync::{lock, RawMutex},
system::PeripheralClockControl,
};
const NUM_TIMG: usize = 1 + cfg!(timg1) as usize;
static INT_ENA_LOCK: [Lock; NUM_TIMG] = [const { Lock::new() }; NUM_TIMG];
static INT_ENA_LOCK: [RawMutex; NUM_TIMG] = [const { RawMutex::new() }; NUM_TIMG];
/// A timer group consisting of
#[cfg_attr(not(timg_timer1), doc = "a general purpose timer")]

View File

@ -10,7 +10,7 @@ pub(crate) mod os_adapter_chip_specific;
use core::{cell::RefCell, ptr::addr_of_mut};
use enumset::EnumSet;
use esp_hal::sync::{Lock, Locked};
use esp_hal::sync::{Locked, RawMutex};
use super::WifiEvent;
use crate::{
@ -35,7 +35,7 @@ use crate::{
timer::yield_task,
};
static WIFI_LOCK: Lock = Lock::new();
static WIFI_LOCK: RawMutex = RawMutex::new();
static mut QUEUE_HANDLE: *mut ConcurrentQueue = core::ptr::null_mut();

View File

@ -7,16 +7,45 @@
#![no_std]
#![no_main]
use esp_hal::{
delay::Delay,
interrupt::{
software::{SoftwareInterrupt, SoftwareInterruptControl},
InterruptHandler,
Priority,
},
peripherals::Peripherals,
sync::{Locked, RawPriorityLimitedMutex},
};
use hil_test as _;
fn test_access_at_priority(peripherals: Peripherals, priority: Priority) {
static LOCK: RawPriorityLimitedMutex = RawPriorityLimitedMutex::new(Priority::Priority1);
extern "C" fn access<const INT: u8>() {
unsafe { SoftwareInterrupt::<INT>::steal().reset() };
LOCK.lock(|| {});
embedded_test::export::check_outcome(());
}
let sw_ints = SoftwareInterruptControl::new(peripherals.SW_INTERRUPT);
let mut prio_2_interrupt = sw_ints.software_interrupt1;
prio_2_interrupt.set_interrupt_handler(InterruptHandler::new(access::<1>, priority));
prio_2_interrupt.raise();
loop {}
}
#[cfg(test)]
#[embedded_test::tests(default_timeout = 3)]
mod tests {
use esp_hal::sync::Locked;
use super::*;
#[init]
fn init() {
esp_hal::init(esp_hal::Config::default());
fn init() -> Peripherals {
esp_hal::init(esp_hal::Config::default())
}
#[test]
@ -55,4 +84,66 @@ mod tests {
});
});
}
#[test]
fn priority_lock_tests(peripherals: Peripherals) {
use portable_atomic::{AtomicU32, Ordering};
static COUNTER: AtomicU32 = AtomicU32::new(0);
extern "C" fn increment<const INT: u8>() {
unsafe { SoftwareInterrupt::<INT>::steal().reset() };
COUNTER.fetch_add(1, Ordering::AcqRel);
}
let sw_ints = SoftwareInterruptControl::new(peripherals.SW_INTERRUPT);
let mut prio_1_interrupt = sw_ints.software_interrupt0;
let mut prio_2_interrupt = sw_ints.software_interrupt1;
prio_1_interrupt
.set_interrupt_handler(InterruptHandler::new(increment::<0>, Priority::Priority1));
prio_2_interrupt
.set_interrupt_handler(InterruptHandler::new(increment::<1>, Priority::Priority2));
let lock = RawPriorityLimitedMutex::new(Priority::Priority1);
let delay = Delay::new();
// Lock does nothing unless taken
prio_1_interrupt.raise();
// Software interrupts may not trigger immediately and there may be some
// instructions executed after `raise`. We need to wait a short while
// to ensure that the interrupt has been serviced before reading the counter.
delay.delay_millis(1);
assert_eq!(COUNTER.load(Ordering::Acquire), 1);
// Taking the lock masks the lower priority interrupt
lock.lock(|| {
prio_1_interrupt.raise();
delay.delay_millis(1);
assert_eq!(COUNTER.load(Ordering::Acquire), 1); // not incremented
// Taken lock does not mask higher priority interrupts
prio_2_interrupt.raise();
delay.delay_millis(1);
assert_eq!(COUNTER.load(Ordering::Acquire), 2);
});
// Releasing the lock unmasks the lower priority interrupt
delay.delay_millis(1);
assert_eq!(COUNTER.load(Ordering::Acquire), 3);
}
#[test]
fn priority_lock_allows_access_from_equal_priority(peripherals: Peripherals) {
test_access_at_priority(peripherals, Priority::Priority1);
}
#[test]
#[should_panic]
fn priority_lock_panics_on_higher_priority_access(peripherals: Peripherals) {
test_access_at_priority(peripherals, Priority::Priority2);
}
}