Merge branch 'main' into adc_with_clock

This commit is contained in:
Dario Nieuwenhuis 2025-09-05 15:29:20 +02:00 committed by GitHub
commit 704c294162
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
39 changed files with 1965 additions and 502 deletions

View File

@ -161,9 +161,9 @@ jobs:
if: steps.changes.outputs.embassy-executor-macros == 'true'
uses: dangoslen/changelog-enforcer@v3
with:
changeLogPath: embassy-executor-macros/CHANGELOG.md
changeLogPath: embassy-executor/CHANGELOG.md
skipLabels: "skip-changelog"
missingUpdateErrorMessage: "Please add a changelog entry in the embassy-executor-macros/CHANGELOG.md file."
missingUpdateErrorMessage: "Please add a changelog entry in the embassy-executor/CHANGELOG.md file."
- name: Check that changelog updated (embassy-executor-timer-queue)
if: steps.changes.outputs.embassy-executor-timer-queue == 'true'
uses: dangoslen/changelog-enforcer@v3

25
ci.sh
View File

@ -193,18 +193,19 @@ cargo batch \
--- build --release --manifest-path embassy-nxp/Cargo.toml --target thumbv8m.main-none-eabihf --features lpc55,defmt \
--- build --release --manifest-path embassy-nxp/Cargo.toml --target thumbv7em-none-eabihf --features mimxrt1011,rt,defmt,time-driver-pit \
--- build --release --manifest-path embassy-nxp/Cargo.toml --target thumbv7em-none-eabihf --features mimxrt1062,rt,defmt,time-driver-pit \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0c1104dgs20,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3507pm,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3519pz,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1306rhb,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l2228pn,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1345dgs28,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1106dgs28,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1228pm,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1107ycj,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3105rhb,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1505pt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1519rhb,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0c1104dgs20,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0c1106rgz,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3507pm,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3519pz,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1306rhb,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l2228pn,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1345dgs28,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1106dgs28,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0l1228pm,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1107ycj,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g3105rhb,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1505pt,rt,defmt,time-driver-any \
--- build --release --manifest-path embassy-mspm0/Cargo.toml --target thumbv6m-none-eabi --features mspm0g1519rhb,rt,defmt,time-driver-any \
--- build --release --manifest-path cyw43/Cargo.toml --target thumbv6m-none-eabi --features ''\
--- build --release --manifest-path cyw43/Cargo.toml --target thumbv6m-none-eabi --features 'log' \
--- build --release --manifest-path cyw43/Cargo.toml --target thumbv6m-none-eabi --features 'defmt' \

View File

@ -23,3 +23,4 @@ proc-macro = true
[features]
nightly = []
metadata-name = []

View File

@ -170,6 +170,14 @@ For example: `#[embassy_executor::main(entry = ..., executor = \"some_crate::Exe
let f_body = f.body;
let out = &f.sig.output;
let name_main_task = if cfg!(feature = "metadata-name") {
quote!(
main_task.metadata().set_name("main\0");
)
} else {
quote!()
};
let (main_ret, mut main_body) = match arch.flavor {
Flavor::Standard => (
quote!(!),
@ -181,7 +189,9 @@ For example: `#[embassy_executor::main(entry = ..., executor = \"some_crate::Exe
let mut executor = #executor::new();
let executor = unsafe { __make_static(&mut executor) };
executor.run(|spawner| {
spawner.spawn(__embassy_main(spawner).unwrap());
let main_task = __embassy_main(spawner).unwrap();
#name_main_task
spawner.spawn(main_task);
})
},
),
@ -191,7 +201,9 @@ For example: `#[embassy_executor::main(entry = ..., executor = \"some_crate::Exe
let executor = ::std::boxed::Box::leak(::std::boxed::Box::new(#executor::new()));
executor.start(|spawner| {
spawner.spawn(__embassy_main(spawner).unwrap());
let main_task = __embassy_main(spawner).unwrap();
#name_main_task
spawner.spawn(main_task);
});
Ok(())

View File

@ -8,14 +8,20 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## Unreleased - ReleaseDate
- Added new metadata API for tasks
- Added new metadata API for tasks.
- Main task automatically gets a name of `main` when the `metadata-name` feature is enabled.
- Upgraded rtos-trace
## 0.9.1 - 2025-08-31
- Fixed performance regression on some ESP32 MCUs.
## 0.9.0 - 2025-08-26
- Added `extern "Rust" fn __embassy_time_queue_item_from_waker`
- Removed `TaskRef::dangling`
- Added `embassy_time_queue_utils` as a dependency
- Moved the `TimeQueueItem` struct and `timer-item-payload-size-*` features into embassy-time-queue-utils
- Added `embassy-executor-timer-queue` as a dependency
- Moved the `TimeQueueItem` struct and `timer-item-payload-size-*` features (as `timer-item-size-X-words`) into `embassy-executor-timer-queue`
## 0.8.0 - 2025-07-31

View File

@ -51,7 +51,7 @@ features = ["defmt", "arch-cortex-m", "executor-thread", "executor-interrupt"]
[dependencies]
defmt = { version = "1.0.1", optional = true }
log = { version = "0.4.14", optional = true }
rtos-trace = { version = "0.1.3", optional = true }
rtos-trace = { version = "0.2", optional = true }
embassy-executor-macros = { version = "0.7.0", path = "../embassy-executor-macros" }
embassy-time-driver = { version = "0.2.1", path = "../embassy-time-driver", optional = true }
@ -112,7 +112,7 @@ arch-spin = ["_arch"]
#! ### Metadata
## Enable the `name` field in task metadata.
metadata-name = []
metadata-name = ["embassy-executor-macros/metadata-name"]
#! ### Executor
@ -125,19 +125,3 @@ trace = ["_any_trace"]
## Enable support for rtos-trace framework
rtos-trace = ["_any_trace", "metadata-name", "dep:rtos-trace", "dep:embassy-time-driver"]
_any_trace = []
#! ### Timer Item Payload Size
#! Sets the size of the payload for timer items, allowing integrated timer implementors to store
#! additional data in the timer item. The payload field will be aligned to this value as well.
#! If these features are not defined, the timer item will contain no payload field.
_timer-item-payload = [] # A size was picked
## 1 bytes
timer-item-payload-size-1 = ["_timer-item-payload"]
## 2 bytes
timer-item-payload-size-2 = ["_timer-item-payload"]
## 4 bytes
timer-item-payload-size-4 = ["_timer-item-payload"]
## 8 bytes
timer-item-payload-size-8 = ["_timer-item-payload"]

View File

@ -12,8 +12,14 @@
mod run_queue;
#[cfg_attr(all(cortex_m, target_has_atomic = "32"), path = "state_atomics_arm.rs")]
#[cfg_attr(all(not(cortex_m), target_has_atomic = "8"), path = "state_atomics.rs")]
#[cfg_attr(not(target_has_atomic = "8"), path = "state_critical_section.rs")]
#[cfg_attr(
all(not(cortex_m), any(target_has_atomic = "8", target_has_atomic = "32")),
path = "state_atomics.rs"
)]
#[cfg_attr(
not(any(target_has_atomic = "8", target_has_atomic = "32")),
path = "state_critical_section.rs"
)]
mod state;
#[cfg(feature = "_any_trace")]

View File

@ -1,4 +1,15 @@
use core::sync::atomic::{AtomicU8, Ordering};
// Prefer pointer-width atomic operations, as narrower ones may be slower.
#[cfg(all(target_pointer_width = "32", target_has_atomic = "32"))]
type AtomicState = core::sync::atomic::AtomicU32;
#[cfg(not(all(target_pointer_width = "32", target_has_atomic = "32")))]
type AtomicState = core::sync::atomic::AtomicU8;
#[cfg(all(target_pointer_width = "32", target_has_atomic = "32"))]
type StateBits = u32;
#[cfg(not(all(target_pointer_width = "32", target_has_atomic = "32")))]
type StateBits = u8;
use core::sync::atomic::Ordering;
#[derive(Clone, Copy)]
pub(crate) struct Token(());
@ -11,18 +22,18 @@ pub(crate) fn locked<R>(f: impl FnOnce(Token) -> R) -> R {
}
/// Task is spawned (has a future)
pub(crate) const STATE_SPAWNED: u8 = 1 << 0;
pub(crate) const STATE_SPAWNED: StateBits = 1 << 0;
/// Task is in the executor run queue
pub(crate) const STATE_RUN_QUEUED: u8 = 1 << 1;
pub(crate) const STATE_RUN_QUEUED: StateBits = 1 << 1;
pub(crate) struct State {
state: AtomicU8,
state: AtomicState,
}
impl State {
pub const fn new() -> State {
Self {
state: AtomicU8::new(0),
state: AtomicState::new(0),
}
}

View File

@ -3,13 +3,18 @@ use core::cell::Cell;
pub(crate) use critical_section::{with as locked, CriticalSection as Token};
use critical_section::{CriticalSection, Mutex};
#[cfg(target_arch = "avr")]
type StateBits = u8;
#[cfg(not(target_arch = "avr"))]
type StateBits = usize;
/// Task is spawned (has a future)
pub(crate) const STATE_SPAWNED: u8 = 1 << 0;
pub(crate) const STATE_SPAWNED: StateBits = 1 << 0;
/// Task is in the executor run queue
pub(crate) const STATE_RUN_QUEUED: u8 = 1 << 1;
pub(crate) const STATE_RUN_QUEUED: StateBits = 1 << 1;
pub(crate) struct State {
state: Mutex<Cell<u8>>,
state: Mutex<Cell<StateBits>>,
}
impl State {
@ -19,11 +24,11 @@ impl State {
}
}
fn update<R>(&self, f: impl FnOnce(&mut u8) -> R) -> R {
fn update<R>(&self, f: impl FnOnce(&mut StateBits) -> R) -> R {
critical_section::with(|cs| self.update_with_cs(cs, f))
}
fn update_with_cs<R>(&self, cs: CriticalSection<'_>, f: impl FnOnce(&mut u8) -> R) -> R {
fn update_with_cs<R>(&self, cs: CriticalSection<'_>, f: impl FnOnce(&mut StateBits) -> R) -> R {
let s = self.state.borrow(cs);
let mut val = s.get();
let r = f(&mut val);

View File

@ -7,7 +7,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## Unreleased - ReleaseDate
- feat: Add I2C Controller (blocking & async) + examples for mspm0l1306, mspm0g3507 (tested MCUs) (#4435)
- fix gpio interrupt not being set for mspm0l110x
- feat: Add window watchdog implementation based on WWDT0, WWDT1 peripherals (#4574)
- feat: Add MSPM0C1105/C1106 support

View File

@ -69,7 +69,7 @@ cortex-m = "0.7.6"
critical-section = "1.2.0"
# mspm0-metapac = { version = "" }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-fe17d879548757ca29821da66a1bebf2debd4846" }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-d7bf3d01ac0780e716a45b0474234d39443dc5cf" }
[build-dependencies]
proc-macro2 = "1.0.94"
@ -77,7 +77,7 @@ quote = "1.0.40"
cfg_aliases = "0.2.1"
# mspm0-metapac = { version = "", default-features = false, features = ["metadata"] }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-fe17d879548757ca29821da66a1bebf2debd4846", default-features = false, features = ["metadata"] }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-d7bf3d01ac0780e716a45b0474234d39443dc5cf", default-features = false, features = ["metadata"] }
[features]
default = ["rt"]
@ -159,6 +159,24 @@ mspm0c1104dsg = ["mspm0-metapac/mspm0c1104dsg"]
mspm0c1104dyy = ["mspm0-metapac/mspm0c1104dyy"]
mspm0c1104ruk = ["mspm0-metapac/mspm0c1104ruk"]
mspm0c1104ycj = ["mspm0-metapac/mspm0c1104ycj"]
mspm0c1105pt = ["mspm0-metapac/mspm0c1105pt"]
mspm0c1105rgz = ["mspm0-metapac/mspm0c1105rgz"]
mspm0c1105rhb = ["mspm0-metapac/mspm0c1105rhb"]
mspm0c1105dgs32 = ["mspm0-metapac/mspm0c1105dgs32"]
mspm0c1105dgs28 = ["mspm0-metapac/mspm0c1105dgs28"]
mspm0c1105rge = ["mspm0-metapac/mspm0c1105rge"]
mspm0c1105dgs20 = ["mspm0-metapac/mspm0c1105dgs20"]
mspm0c1105ruk = ["mspm0-metapac/mspm0c1105ruk"]
mspm0c1105zcm = ["mspm0-metapac/mspm0c1105zcm"]
mspm0c1106pt = ["mspm0-metapac/mspm0c1106pt"]
mspm0c1106rgz = ["mspm0-metapac/mspm0c1106rgz"]
mspm0c1106rhb = ["mspm0-metapac/mspm0c1106rhb"]
mspm0c1106dgs32 = ["mspm0-metapac/mspm0c1106dgs32"]
mspm0c1106dgs28 = ["mspm0-metapac/mspm0c1106dgs28"]
mspm0c1106rge = ["mspm0-metapac/mspm0c1106rge"]
mspm0c1106dgs20 = ["mspm0-metapac/mspm0c1106dgs20"]
mspm0c1106ruk = ["mspm0-metapac/mspm0c1106ruk"]
mspm0c1106zcm = ["mspm0-metapac/mspm0c1106zcm"]
mspm0g1105dgs28 = ["mspm0-metapac/mspm0g1105dgs28"]
mspm0g1105pm = ["mspm0-metapac/mspm0g1105pm"]
mspm0g1105pt = ["mspm0-metapac/mspm0g1105pt"]

View File

@ -79,10 +79,14 @@ fn get_chip_cfgs(chip_name: &str) -> Vec<String> {
let mut cfgs = Vec::new();
// GPIO on C110x is special as it does not belong to an interrupt group.
if chip_name.starts_with("mspm0c110") || chip_name.starts_with("msps003f") {
if chip_name.starts_with("mspm0c1103") || chip_name.starts_with("mspm0c1104") || chip_name.starts_with("msps003f") {
cfgs.push("mspm0c110x".to_string());
}
if chip_name.starts_with("mspm0c1105") || chip_name.starts_with("mspm0c1106") {
cfgs.push("mspm0c1105_c1106".to_string());
}
// Family ranges (temporary until int groups are generated)
//
// TODO: Remove this once int group stuff is generated.
@ -537,6 +541,8 @@ fn generate_interrupts() -> TokenStream {
pub fn enable_group_interrupts(_cs: critical_section::CriticalSection) {
use crate::interrupt::typelevel::Interrupt;
// This is empty for C1105/6
#[allow(unused_unsafe)]
unsafe {
#(#group_interrupt_enables)*
}

View File

@ -10,7 +10,7 @@ use embassy_sync::waitqueue::AtomicWaker;
use crate::pac::gpio::vals::*;
use crate::pac::gpio::{self};
#[cfg(all(feature = "rt", any(mspm0c110x, mspm0l110x)))]
#[cfg(all(feature = "rt", any(mspm0c110x, mspm0c1105_c1106, mspm0l110x)))]
use crate::pac::interrupt;
use crate::pac::{self};
@ -1108,24 +1108,30 @@ fn irq_handler(gpio: gpio::Gpio, wakers: &[AtomicWaker; 32]) {
// C110x and L110x have a dedicated interrupts just for GPIOA.
//
// These chips do not have a GROUP1 interrupt.
#[cfg(all(feature = "rt", any(mspm0c110x, mspm0l110x)))]
#[cfg(all(feature = "rt", any(mspm0c110x, mspm0c1105_c1106, mspm0l110x)))]
#[interrupt]
fn GPIOA() {
irq_handler(pac::GPIOA, &PORTA_WAKERS);
}
#[cfg(all(feature = "rt", mspm0c1105_c1106))]
#[interrupt]
fn GPIOB() {
irq_handler(pac::GPIOB, &PORTB_WAKERS);
}
// These symbols are weakly defined as DefaultHandler and are called by the interrupt group implementation.
//
// Defining these as no_mangle is required so that the linker will pick these over the default handler.
#[cfg(all(feature = "rt", not(any(mspm0c110x, mspm0l110x))))]
#[cfg(all(feature = "rt", not(any(mspm0c110x, mspm0c1105_c1106, mspm0l110x))))]
#[no_mangle]
#[allow(non_snake_case)]
fn GPIOA() {
irq_handler(pac::GPIOA, &PORTA_WAKERS);
}
#[cfg(all(feature = "rt", gpio_pb))]
#[cfg(all(feature = "rt", gpio_pb, not(mspm0c1105_c1106)))]
#[no_mangle]
#[allow(non_snake_case)]
fn GPIOB() {

View File

@ -195,7 +195,7 @@ impl Config {
.unwrap();
}
#[cfg(any(mspm0c110x))]
#[cfg(any(mspm0c110x, mspm0c1105_c1106))]
fn calculate_clock_source(&self) -> u32 {
// Assume that BusClk has default value.
// TODO: calculate BusClk more precisely.

View File

@ -8,6 +8,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## Unreleased - ReleaseDate
- changed: nrf54l: Disable glitch detection and enable DC/DC in init.
## 0.7.0 - 2025-08-26
- bugfix: use correct analog input SAADC pins on nrf5340

View File

@ -707,6 +707,14 @@ pub fn init(config: config::Config) -> Peripherals {
}
}
// GLITCHDET is only accessible for secure code
#[cfg(all(feature = "_nrf54l", feature = "_s"))]
{
// The voltage glitch detectors are automatically enabled after reset.
// To save power, the glitch detectors must be disabled when not in use.
pac::GLITCHDET.config().write(|w| w.set_enable(false));
}
// Setup debug protection.
#[cfg(not(feature = "_nrf51"))]
match config.debug {
@ -1083,6 +1091,15 @@ pub fn init(config: config::Config) -> Peripherals {
reg.vregradio().dcdcen().write(|w| w.set_dcdcen(true));
}
}
#[cfg(feature = "_nrf54l")]
{
// Turn on DCDC
// From Product specification:
// "Once the device starts, the DC/DC regulator must be enabled using register VREGMAIN.DCDCEN.
// When enabling the DC/DC regulator, the device checks if an inductor is connected to the DCC pin.
// If an inductor is not detected, the device remains in LDO mode"
pac::REGULATORS.vregmain().dcdcen().write(|w| w.set_val(true));
}
// Init GPIOTE
#[cfg(not(feature = "_nrf54l"))] // TODO

View File

@ -8,6 +8,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
<!-- next-header -->
## Unreleased - ReleaseDate
- fix: Fixed STM32H5 builds requiring time feature
- feat: Derive Clone, Copy for QSPI Config
- fix: stm32/i2c in master mode (blocking): subsequent transmissions failed after a NACK was received
- feat: stm32/timer: add set_polarity functions for main and complementary outputs in complementary_pwm
## 0.4.0 - 2025-08-26
- feat: stm32/sai: make NODIV independent of MCKDIV
@ -21,6 +26,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- fix: Fix XSPI not disabling alternate bytes when they were previously enabled
- fix: Fix stm32h7rs init when using external flash via XSPI
- feat: Add Adc::new_with_clock() to configure analog clock
- feat: Add GPDMA linked-list + ringbuffer support ([#3923](https://github.com/embassy-rs/embassy/pull/3923))
## 0.3.0 - 2025-08-12
@ -131,7 +137,7 @@ GPIO:
- Refactor AfType ([#3031](https://github.com/embassy-rs/embassy/pull/3031))
- Gpiov1: Do not call set_speed for AFType::Input ([#2996](https://github.com/embassy-rs/embassy/pull/2996))
UART:
UART:
- Add embedded-io impls ([#2739](https://github.com/embassy-rs/embassy/pull/2739))
- Add support for changing baud rate ([#3512](https://github.com/embassy-rs/embassy/pull/3512))
- Add split_ref ([#3500](https://github.com/embassy-rs/embassy/pull/3500))
@ -155,7 +161,7 @@ UART:
- Wake receive task for each received byte ([#2722](https://github.com/embassy-rs/embassy/pull/2722))
- Fix dma and idle line detection in ringbuffereduartrx ([#3319](https://github.com/embassy-rs/embassy/pull/3319))
SPI:
SPI:
- Add MISO pullup configuration option ([#2943](https://github.com/embassy-rs/embassy/pull/2943))
- Add slew rate configuration options ([#3669](https://github.com/embassy-rs/embassy/pull/3669))
- Fix blocking_write on nosck spi. ([#3035](https://github.com/embassy-rs/embassy/pull/3035))

View File

@ -1814,7 +1814,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
// Configure DMA to transfer input to crypto core.
let dst_ptr: *mut u32 = T::regs().din().as_ptr();
let options = TransferOptions {
#[cfg(not(gpdma))]
priority: crate::dma::Priority::High,
..Default::default()
};
@ -1834,7 +1833,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
// Configure DMA to transfer input to crypto core.
let dst_ptr: *mut u32 = T::regs().din().as_ptr();
let options = TransferOptions {
#[cfg(not(gpdma))]
priority: crate::dma::Priority::High,
..Default::default()
};
@ -1853,7 +1851,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
// Configure DMA to get output from crypto core.
let src_ptr = T::regs().dout().as_ptr();
let options = TransferOptions {
#[cfg(not(gpdma))]
priority: crate::dma::Priority::VeryHigh,
..Default::default()
};

View File

@ -498,7 +498,31 @@ impl AnyChannel {
}
}
fn request_stop(&self) {
fn request_pause(&self) {
let info = self.info();
match self.info().dma {
#[cfg(dma)]
DmaInfo::Dma(r) => {
// Disable the channel without overwriting the existing configuration
r.st(info.num).cr().modify(|w| {
w.set_en(false);
});
}
#[cfg(bdma)]
DmaInfo::Bdma(r) => {
// Disable the channel without overwriting the existing configuration
r.ch(info.num).cr().modify(|w| {
w.set_en(false);
});
}
}
}
fn request_resume(&self) {
self.start()
}
fn request_reset(&self) {
let info = self.info();
match self.info().dma {
#[cfg(dma)]
@ -518,26 +542,8 @@ impl AnyChannel {
});
}
}
}
fn request_pause(&self) {
let info = self.info();
match self.info().dma {
#[cfg(dma)]
DmaInfo::Dma(r) => {
// Disable the channel without overwriting the existing configuration
r.st(info.num).cr().modify(|w| {
w.set_en(false);
});
}
#[cfg(bdma)]
DmaInfo::Bdma(r) => {
// Disable the channel without overwriting the existing configuration
r.ch(info.num).cr().modify(|w| {
w.set_en(false);
});
}
}
while self.is_running() {}
}
fn is_running(&self) -> bool {
@ -710,27 +716,31 @@ impl<'a> Transfer<'a> {
Self { channel }
}
/// Request the transfer to stop.
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_stop(&mut self) {
self.channel.request_stop()
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
/// To restart the transfer, call [`start`](Self::start) again.
///
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause()
}
/// Request the transfer to resume after having been paused.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
/// it was requested to stop early with [`request_pause`](Self::request_pause).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
@ -754,7 +764,7 @@ impl<'a> Transfer<'a> {
impl<'a> Drop for Transfer<'a> {
fn drop(&mut self) {
self.request_stop();
self.request_reset();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
@ -901,15 +911,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
}
/// Request the DMA to stop.
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_stop(&mut self) {
self.channel.request_stop()
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
/// To restart the transfer, call [`start`](Self::start) again.
///
@ -918,10 +919,23 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
self.channel.request_pause()
}
/// Request the transfer to resume after having been paused.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether DMA is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
/// it was requested to stop early with [`request_reset`](Self::request_reset).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
@ -934,7 +948,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
/// This is designed to be used with streaming input data such as the
/// I2S/SAI or ADC.
///
/// When using the UART, you probably want `request_stop()`.
/// When using the UART, you probably want `request_reset()`.
pub async fn stop(&mut self) {
self.channel.disable_circular_mode();
//wait until cr.susp reads as true
@ -948,7 +962,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
fn drop(&mut self) {
self.request_stop();
self.request_reset();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
@ -1058,8 +1072,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_stop(&mut self) {
self.channel.request_stop()
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
@ -1073,7 +1087,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
/// Return whether DMA is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
/// it was requested to stop early with [`request_reset`](Self::request_reset).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
@ -1098,7 +1112,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
fn drop(&mut self) {
self.request_stop();
self.request_reset();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."

View File

@ -1,339 +0,0 @@
#![macro_use]
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, Ordering};
use core::task::{Context, Poll};
use embassy_hal_internal::Peri;
use embassy_sync::waitqueue::AtomicWaker;
use super::word::{Word, WordSize};
use super::{AnyChannel, Channel, Dir, Request, STATE};
use crate::interrupt::typelevel::Interrupt;
use crate::interrupt::Priority;
use crate::pac;
use crate::pac::gpdma::vals;
pub(crate) struct ChannelInfo {
pub(crate) dma: pac::gpdma::Gpdma,
pub(crate) num: usize,
#[cfg(feature = "_dual-core")]
pub(crate) irq: pac::Interrupt,
}
/// GPDMA transfer options.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {}
impl Default for TransferOptions {
fn default() -> Self {
Self {}
}
}
impl From<WordSize> for vals::Dw {
fn from(raw: WordSize) -> Self {
match raw {
WordSize::OneByte => Self::BYTE,
WordSize::TwoBytes => Self::HALF_WORD,
WordSize::FourBytes => Self::WORD,
}
}
}
pub(crate) struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
pub(crate) const NEW: Self = Self {
waker: AtomicWaker::new(),
};
}
/// safety: must be called only once
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
foreach_interrupt! {
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
#[cfg(not(feature = "_dual-core"))]
crate::interrupt::typelevel::$irq::enable();
};
}
crate::_generated::init_gpdma();
}
impl AnyChannel {
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq(&self) {
let info = self.info();
#[cfg(feature = "_dual-core")]
{
use embassy_hal_internal::interrupt::InterruptExt as _;
info.irq.enable();
}
let state = &STATE[self.id as usize];
let ch = info.dma.ch(info.num);
let sr = ch.sr().read();
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
info.dma.as_ptr() as u32,
info.num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
info.dma.as_ptr() as u32,
info.num
);
}
if sr.suspf() || sr.tcf() {
// disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
// Wake the future. It'll look at tcf and see it's set.
state.waker.wake();
}
}
}
/// DMA transfer.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a> {
channel: Peri<'a, AnyChannel>,
}
impl<'a> Transfer<'a> {
/// Create a new read DMA transfer (peripheral to memory).
pub unsafe fn new_read<W: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
pub unsafe fn new_read_raw<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut PW,
buf: *mut [MW],
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
buf as *mut MW as *mut u32,
buf.len(),
true,
PW::size(),
MW::size(),
options,
)
}
/// Create a new write DMA transfer (memory to peripheral).
pub unsafe fn new_write<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
buf: &'a [MW],
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
pub unsafe fn new_write_raw<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
buf: *const [MW],
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
buf as *const MW as *mut u32,
buf.len(),
true,
MW::size(),
PW::size(),
options,
)
}
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
pub unsafe fn new_write_repeated<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
repeated: &'a MW,
count: usize,
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const MW as *mut u32,
count,
false,
MW::size(),
PW::size(),
options,
)
}
unsafe fn new_inner(
channel: Peri<'a, AnyChannel>,
request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
dst_size: WordSize,
_options: TransferOptions,
) -> Self {
// BNDT is specified as bytes, not as number of transfers.
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
panic!("DMA transfers may not be larger than 65535 bytes.");
};
let info = channel.info();
let ch = info.dma.ch(info.num);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
let this = Self { channel };
ch.cr().write(|w| w.set_reset(true));
ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
ch.llr().write(|_| {}); // no linked list
ch.tr1().write(|w| {
w.set_sdw(data_size.into());
w.set_ddw(dst_size.into());
w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
});
ch.tr2().write(|w| {
w.set_dreq(match dir {
Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
});
w.set_reqsel(request);
});
ch.tr3().write(|_| {}); // no address offsets.
ch.br1().write(|w| w.set_bndt(bndt));
match dir {
Dir::MemoryToPeripheral => {
ch.sar().write_value(mem_addr as _);
ch.dar().write_value(peri_addr as _);
}
Dir::PeripheralToMemory => {
ch.sar().write_value(peri_addr as _);
ch.dar().write_value(mem_addr as _);
}
}
ch.cr().write(|w| {
// Enable interrupts
w.set_tcie(true);
w.set_useie(true);
w.set_dteie(true);
w.set_suspie(true);
// Start it
w.set_en(true);
});
this
}
/// Request the transfer to stop.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_stop(&mut self) {
let info = self.channel.info();
let ch = info.dma.ch(info.num);
ch.cr().modify(|w| w.set_susp(true))
}
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
pub fn is_running(&mut self) -> bool {
let info = self.channel.info();
let ch = info.dma.ch(info.num);
let sr = ch.sr().read();
!sr.tcf() && !sr.suspf()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
let info = self.channel.info();
let ch = info.dma.ch(info.num);
ch.br1().read().bndt()
}
/// Blocking wait until the transfer finishes.
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a> Drop for Transfer<'a> {
fn drop(&mut self) {
self.request_stop();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
impl<'a> Unpin for Transfer<'a> {}
impl<'a> Future for Transfer<'a> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let state = &STATE[self.channel.id as usize];
state.waker.register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}

View File

@ -0,0 +1,267 @@
//! Implementation of the GPDMA linked list and linked list items.
#![macro_use]
use stm32_metapac::gpdma::regs;
use stm32_metapac::gpdma::vals::Dreq;
use crate::dma::word::{Word, WordSize};
use crate::dma::{Dir, Request};
/// The mode in which to run the linked list.
#[derive(Debug)]
pub enum RunMode {
/// List items are not linked together.
Unlinked,
/// The list is linked sequentially and only run once.
Once,
/// The list is linked sequentially, and the end of the list is linked to the beginning.
Circular,
}
/// A linked-list item for linear GPDMA transfers.
///
/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities.
#[derive(Debug, Copy, Clone, Default)]
#[repr(C)]
pub struct LinearItem {
/// Transfer register 1.
pub tr1: regs::ChTr1,
/// Transfer register 2.
pub tr2: regs::ChTr2,
/// Block register 2.
pub br1: regs::ChBr1,
/// Source address register.
pub sar: u32,
/// Destination address register.
pub dar: u32,
/// Linked-list address register.
pub llr: regs::ChLlr,
}
impl LinearItem {
/// Create a new read DMA transfer (peripheral to memory).
pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self {
Self::new_inner(
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
buf as *mut [W] as *mut W as *mut u32,
buf.len(),
true,
W::size(),
W::size(),
)
}
/// Create a new write DMA transfer (memory to peripheral).
pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self {
Self::new_inner(
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
buf as *const [MW] as *const MW as *mut u32,
buf.len(),
true,
MW::size(),
PW::size(),
)
}
unsafe fn new_inner(
request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
dst_size: WordSize,
) -> Self {
// BNDT is specified as bytes, not as number of transfers.
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
panic!("DMA transfers may not be larger than 65535 bytes.");
};
let mut br1 = regs::ChBr1(0);
br1.set_bndt(bndt);
let mut tr1 = regs::ChTr1(0);
tr1.set_sdw(data_size.into());
tr1.set_ddw(dst_size.into());
tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
let mut tr2 = regs::ChTr2(0);
tr2.set_dreq(match dir {
Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL,
Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL,
});
tr2.set_reqsel(request);
let (sar, dar) = match dir {
Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _),
Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _),
};
let llr = regs::ChLlr(0);
Self {
tr1,
tr2,
br1,
sar,
dar,
llr,
}
}
/// Link to the next linear item at the given address.
///
/// Enables channel update bits.
fn link_to(&mut self, next: u16) {
let mut llr = regs::ChLlr(0);
llr.set_ut1(true);
llr.set_ut2(true);
llr.set_ub1(true);
llr.set_usa(true);
llr.set_uda(true);
llr.set_ull(true);
// Lower two bits are ignored: 32 bit aligned.
llr.set_la(next >> 2);
self.llr = llr;
}
/// Unlink the next linear item.
///
/// Disables channel update bits.
fn unlink(&mut self) {
self.llr = regs::ChLlr(0);
}
/// The item's transfer count in number of words.
fn transfer_count(&self) -> usize {
let word_size: WordSize = self.tr1.ddw().into();
self.br1.bndt() as usize / word_size.bytes()
}
}
/// A table of linked list items.
#[repr(C)]
pub struct Table<const ITEM_COUNT: usize> {
/// The items.
pub items: [LinearItem; ITEM_COUNT],
}
impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> {
/// Create a new table.
pub fn new(items: [LinearItem; ITEM_COUNT]) -> Self {
assert!(!items.is_empty());
Self { items }
}
/// Create a ping-pong linked-list table.
///
/// This uses two linked-list items, one for each half of the buffer.
pub unsafe fn new_ping_pong<W: Word>(
request: Request,
peri_addr: *mut W,
buffer: &mut [W],
direction: Dir,
) -> Table<2> {
// Buffer halves should be the same length.
let half_len = buffer.len() / 2;
assert_eq!(half_len * 2, buffer.len());
let items = match direction {
Dir::MemoryToPeripheral => [
LinearItem::new_write(request, &mut buffer[..half_len], peri_addr),
LinearItem::new_write(request, &mut buffer[half_len..], peri_addr),
],
Dir::PeripheralToMemory => [
LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]),
LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]),
],
};
Table::new(items)
}
/// Link the table as given by the run mode.
pub fn link(&mut self, run_mode: RunMode) {
if matches!(run_mode, RunMode::Once | RunMode::Circular) {
self.link_sequential();
}
if matches!(run_mode, RunMode::Circular) {
self.link_repeat();
}
}
/// The number of linked list items.
pub fn len(&self) -> usize {
self.items.len()
}
/// The total transfer count of the table in number of words.
pub fn transfer_count(&self) -> usize {
let mut count = 0;
for item in self.items {
count += item.transfer_count() as usize
}
count
}
/// Link items of given indices together: first -> second.
pub fn link_indices(&mut self, first: usize, second: usize) {
assert!(first < self.len());
assert!(second < self.len());
let second_item = self.offset_address(second);
self.items[first].link_to(second_item);
}
/// Link items sequentially.
pub fn link_sequential(&mut self) {
if self.len() > 1 {
for index in 0..(self.items.len() - 1) {
let next = self.offset_address(index + 1);
self.items[index].link_to(next);
}
}
}
/// Link last to first item.
pub fn link_repeat(&mut self) {
let first_address = self.offset_address(0);
self.items.last_mut().unwrap().link_to(first_address);
}
/// Unlink all items.
pub fn unlink(&mut self) {
for item in self.items.iter_mut() {
item.unlink();
}
}
/// Linked list base address (upper 16 address bits).
pub fn base_address(&self) -> u16 {
((&raw const self.items as u32) >> 16) as _
}
/// Linked list offset address (lower 16 address bits) at the selected index.
pub fn offset_address(&self, index: usize) -> u16 {
assert!(self.items.len() > index);
let address = &raw const self.items[index] as _;
// Ensure 32 bit address alignment.
assert_eq!(address & 0b11, 0);
address
}
}

View File

@ -0,0 +1,699 @@
#![macro_use]
use core::future::Future;
use core::pin::Pin;
use core::sync::atomic::{fence, AtomicUsize, Ordering};
use core::task::{Context, Poll};
use embassy_hal_internal::Peri;
use embassy_sync::waitqueue::AtomicWaker;
use linked_list::Table;
use super::word::{Word, WordSize};
use super::{AnyChannel, Channel, Dir, Request, STATE};
use crate::interrupt::typelevel::Interrupt;
use crate::pac;
use crate::pac::gpdma::vals;
pub mod linked_list;
pub mod ringbuffered;
pub(crate) struct ChannelInfo {
pub(crate) dma: pac::gpdma::Gpdma,
pub(crate) num: usize,
#[cfg(feature = "_dual-core")]
pub(crate) irq: pac::Interrupt,
}
/// DMA request priority
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Priority {
/// Low Priority
Low,
/// Medium Priority
Medium,
/// High Priority
High,
/// Very High Priority
VeryHigh,
}
impl From<Priority> for pac::gpdma::vals::Prio {
fn from(value: Priority) -> Self {
match value {
Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT,
Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT,
Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT,
Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH,
}
}
}
/// GPDMA transfer options.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {
/// Request priority level.
pub priority: Priority,
/// Enable half transfer interrupt.
pub half_transfer_ir: bool,
/// Enable transfer complete interrupt.
pub complete_transfer_ir: bool,
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
priority: Priority::VeryHigh,
half_transfer_ir: false,
complete_transfer_ir: true,
}
}
}
impl From<WordSize> for vals::Dw {
fn from(raw: WordSize) -> Self {
match raw {
WordSize::OneByte => Self::BYTE,
WordSize::TwoBytes => Self::HALF_WORD,
WordSize::FourBytes => Self::WORD,
}
}
}
impl From<vals::Dw> for WordSize {
fn from(raw: vals::Dw) -> Self {
match raw {
vals::Dw::BYTE => Self::OneByte,
vals::Dw::HALF_WORD => Self::TwoBytes,
vals::Dw::WORD => Self::FourBytes,
_ => panic!("Invalid word size"),
}
}
}
pub(crate) struct LLiState {
/// The number of linked-list items.
count: AtomicUsize,
/// The index of the current linked-list item.
index: AtomicUsize,
/// The total transfer count of all linked-list items in number of words.
transfer_count: AtomicUsize,
}
pub(crate) struct ChannelState {
waker: AtomicWaker,
complete_count: AtomicUsize,
lli_state: LLiState,
}
impl ChannelState {
pub(crate) const NEW: Self = Self {
waker: AtomicWaker::new(),
complete_count: AtomicUsize::new(0),
lli_state: LLiState {
count: AtomicUsize::new(0),
index: AtomicUsize::new(0),
transfer_count: AtomicUsize::new(0),
},
};
}
/// safety: must be called only once
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: crate::interrupt::Priority) {
foreach_interrupt! {
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
#[cfg(not(feature = "_dual-core"))]
crate::interrupt::typelevel::$irq::enable();
};
}
crate::_generated::init_gpdma();
}
impl AnyChannel {
/// Safety: Must be called with a matching set of parameters for a valid dma channel
pub(crate) unsafe fn on_irq(&self) {
let info = self.info();
#[cfg(feature = "_dual-core")]
{
use embassy_hal_internal::interrupt::InterruptExt as _;
info.irq.enable();
}
let state = &STATE[self.id as usize];
let ch = info.dma.ch(info.num);
let sr = ch.sr().read();
if sr.dtef() {
panic!(
"DMA: data transfer error on DMA@{:08x} channel {}",
info.dma.as_ptr() as u32,
info.num
);
}
if sr.usef() {
panic!(
"DMA: user settings error on DMA@{:08x} channel {}",
info.dma.as_ptr() as u32,
info.num
);
}
if sr.ulef() {
panic!(
"DMA: link transfer error on DMA@{:08x} channel {}",
info.dma.as_ptr() as u32,
info.num
);
}
if sr.htf() {
ch.fcr().write(|w| w.set_htf(true));
}
if sr.tcf() {
ch.fcr().write(|w| w.set_tcf(true));
let lli_count = state.lli_state.count.load(Ordering::Acquire);
let complete = if lli_count > 0 {
let next_lli_index = state.lli_state.index.load(Ordering::Acquire) + 1;
let complete = next_lli_index >= lli_count;
state
.lli_state
.index
.store(if complete { 0 } else { next_lli_index }, Ordering::Release);
complete
} else {
true
};
if complete {
state.complete_count.fetch_add(1, Ordering::Release);
}
}
if sr.suspf() {
// Disable all xxIEs to prevent the irq from firing again.
ch.cr().write(|_| {});
}
state.waker.wake();
}
fn get_remaining_transfers(&self) -> u16 {
let info = self.info();
let ch = info.dma.ch(info.num);
let word_size: WordSize = ch.tr1().read().ddw().into();
ch.br1().read().bndt() / word_size.bytes() as u16
}
unsafe fn configure(
&self,
request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
dst_size: WordSize,
options: TransferOptions,
) {
// BNDT is specified as bytes, not as number of transfers.
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
panic!("DMA transfers may not be larger than 65535 bytes.");
};
let info = self.info();
let ch = info.dma.ch(info.num);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
ch.cr().write(|w| w.set_reset(true));
ch.fcr().write(|w| {
// Clear all irqs
w.set_dtef(true);
w.set_htf(true);
w.set_suspf(true);
w.set_tcf(true);
w.set_tof(true);
w.set_ulef(true);
w.set_usef(true);
});
ch.llr().write(|_| {}); // no linked list
ch.tr1().write(|w| {
w.set_sdw(data_size.into());
w.set_ddw(dst_size.into());
w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
});
ch.tr2().write(|w| {
w.set_dreq(match dir {
Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
});
w.set_reqsel(request);
});
ch.tr3().write(|_| {}); // no address offsets.
ch.br1().write(|w| w.set_bndt(bndt));
match dir {
Dir::MemoryToPeripheral => {
ch.sar().write_value(mem_addr as _);
ch.dar().write_value(peri_addr as _);
}
Dir::PeripheralToMemory => {
ch.sar().write_value(peri_addr as _);
ch.dar().write_value(mem_addr as _);
}
}
ch.cr().write(|w| {
w.set_prio(options.priority.into());
w.set_htie(options.half_transfer_ir);
w.set_tcie(options.complete_transfer_ir);
w.set_useie(true);
w.set_dteie(true);
w.set_suspie(true);
});
let state = &STATE[self.id as usize];
state.lli_state.count.store(0, Ordering::Relaxed);
state.lli_state.index.store(0, Ordering::Relaxed);
state.lli_state.transfer_count.store(0, Ordering::Relaxed)
}
/// Configure a linked-list transfer.
unsafe fn configure_linked_list<const ITEM_COUNT: usize>(
&self,
table: &Table<ITEM_COUNT>,
options: TransferOptions,
) {
let info = self.info();
let ch = info.dma.ch(info.num);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::SeqCst);
ch.cr().write(|w| w.set_reset(true));
ch.fcr().write(|w| {
// Clear all irqs
w.set_dtef(true);
w.set_htf(true);
w.set_suspf(true);
w.set_tcf(true);
w.set_tof(true);
w.set_ulef(true);
w.set_usef(true);
});
ch.lbar().write(|reg| reg.set_lba(table.base_address()));
// Empty LLI0.
ch.br1().write(|w| w.set_bndt(0));
// Enable all linked-list field updates.
ch.llr().write(|w| {
w.set_ut1(true);
w.set_ut2(true);
w.set_ub1(true);
w.set_usa(true);
w.set_uda(true);
w.set_ull(true);
// Lower two bits are ignored: 32 bit aligned.
w.set_la(table.offset_address(0) >> 2);
});
ch.tr3().write(|_| {}); // no address offsets.
ch.cr().write(|w| {
w.set_prio(options.priority.into());
w.set_htie(options.half_transfer_ir);
w.set_tcie(options.complete_transfer_ir);
w.set_useie(true);
w.set_uleie(true);
w.set_dteie(true);
w.set_suspie(true);
});
let state = &STATE[self.id as usize];
state.lli_state.count.store(ITEM_COUNT, Ordering::Relaxed);
state.lli_state.index.store(0, Ordering::Relaxed);
state
.lli_state
.transfer_count
.store(table.transfer_count(), Ordering::Relaxed)
}
fn start(&self) {
let info = self.info();
let ch = info.dma.ch(info.num);
ch.cr().modify(|w| w.set_en(true));
}
fn request_pause(&self) {
let info = self.info();
let ch = info.dma.ch(info.num);
ch.cr().modify(|w| w.set_susp(true))
}
fn request_resume(&self) {
let info = self.info();
let ch = info.dma.ch(info.num);
ch.cr().modify(|w| w.set_susp(false));
}
fn request_reset(&self) {
let info = self.info();
let ch = info.dma.ch(info.num);
self.request_pause();
while self.is_running() {}
ch.cr().modify(|w| w.set_reset(true));
}
fn is_running(&self) -> bool {
let info = self.info();
let ch = info.dma.ch(info.num);
let sr = ch.sr().read();
!sr.suspf() && !sr.idlef()
}
fn poll_stop(&self) -> Poll<()> {
use core::sync::atomic::compiler_fence;
compiler_fence(Ordering::SeqCst);
if !self.is_running() {
Poll::Ready(())
} else {
Poll::Pending
}
}
}
/// Linked-list DMA transfer.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> {
channel: Peri<'a, AnyChannel>,
}
impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> {
/// Create a new linked-list transfer.
pub unsafe fn new_linked_list<const N: usize>(
channel: Peri<'a, impl Channel>,
table: Table<ITEM_COUNT>,
options: TransferOptions,
) -> Self {
Self::new_inner_linked_list(channel.into(), table, options)
}
unsafe fn new_inner_linked_list(
channel: Peri<'a, AnyChannel>,
table: Table<ITEM_COUNT>,
options: TransferOptions,
) -> Self {
channel.configure_linked_list(&table, options);
channel.start();
Self { channel }
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
///
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause()
}
/// Request the transfer to resume after having been paused.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_pause`](Self::request_pause).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
self.channel.get_remaining_transfers()
}
/// Blocking wait until the transfer finishes.
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> {
fn drop(&mut self) {
self.request_reset();
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {}
impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let state = &STATE[self.channel.id as usize];
state.waker.register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}
/// DMA transfer.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a> {
channel: Peri<'a, AnyChannel>,
}
impl<'a> Transfer<'a> {
/// Create a new read DMA transfer (peripheral to memory).
pub unsafe fn new_read<W: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut W,
buf: &'a mut [W],
options: TransferOptions,
) -> Self {
Self::new_read_raw(channel, request, peri_addr, buf, options)
}
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
pub unsafe fn new_read_raw<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut PW,
buf: *mut [MW],
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::PeripheralToMemory,
peri_addr as *const u32,
buf as *mut MW as *mut u32,
buf.len(),
true,
PW::size(),
MW::size(),
options,
)
}
/// Create a new write DMA transfer (memory to peripheral).
pub unsafe fn new_write<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
buf: &'a [MW],
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_write_raw(channel, request, buf, peri_addr, options)
}
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
pub unsafe fn new_write_raw<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
buf: *const [MW],
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
buf as *const MW as *mut u32,
buf.len(),
true,
MW::size(),
PW::size(),
options,
)
}
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
pub unsafe fn new_write_repeated<MW: Word, PW: Word>(
channel: Peri<'a, impl Channel>,
request: Request,
repeated: &'a MW,
count: usize,
peri_addr: *mut PW,
options: TransferOptions,
) -> Self {
Self::new_inner(
channel.into(),
request,
Dir::MemoryToPeripheral,
peri_addr as *const u32,
repeated as *const MW as *mut u32,
count,
false,
MW::size(),
PW::size(),
options,
)
}
unsafe fn new_inner(
channel: Peri<'a, AnyChannel>,
request: Request,
dir: Dir,
peri_addr: *const u32,
mem_addr: *mut u32,
mem_len: usize,
incr_mem: bool,
data_size: WordSize,
peripheral_size: WordSize,
options: TransferOptions,
) -> Self {
assert!(mem_len > 0 && mem_len <= 0xFFFF);
channel.configure(
request,
dir,
peri_addr,
mem_addr,
mem_len,
incr_mem,
data_size,
peripheral_size,
options,
);
channel.start();
Self { channel }
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
/// To restart the transfer, call [`start`](Self::start) again.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause()
}
/// Request the transfer to resume after being suspended.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_pause`](Self::request_pause).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
/// Gets the total remaining transfers for the channel
/// Note: this will be zero for transfers that completed without cancellation.
pub fn get_remaining_transfers(&self) -> u16 {
self.channel.get_remaining_transfers()
}
/// Blocking wait until the transfer finishes.
pub fn blocking_wait(mut self) {
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
core::mem::forget(self);
}
}
impl<'a> Drop for Transfer<'a> {
fn drop(&mut self) {
self.request_pause();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
impl<'a> Unpin for Transfer<'a> {}
impl<'a> Future for Transfer<'a> {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let state = &STATE[self.channel.id as usize];
state.waker.register(cx.waker());
if self.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}

View File

@ -0,0 +1,332 @@
//! GPDMA ring buffer implementation.
//!
//! FIXME: Add request_pause functionality?
//! FIXME: Stop the DMA, if a user does not queue new transfers (chain of linked-list items ends automatically).
use core::future::poll_fn;
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use embassy_hal_internal::Peri;
use super::{AnyChannel, TransferOptions, STATE};
use crate::dma::gpdma::linked_list::{RunMode, Table};
use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer};
use crate::dma::word::Word;
use crate::dma::{Channel, Dir, Request};
struct DmaCtrlImpl<'a>(Peri<'a, AnyChannel>);
impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
fn get_remaining_transfers(&self) -> usize {
let state = &STATE[self.0.id as usize];
let current_remaining = self.0.get_remaining_transfers() as usize;
let lli_count = state.lli_state.count.load(Ordering::Acquire);
if lli_count > 0 {
// In linked-list mode, the remaining transfers are the sum of the full lengths of LLIs that follow,
// and the remaining transfers for the current LLI.
let lli_index = state.lli_state.index.load(Ordering::Acquire);
let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Acquire) / lli_count;
(lli_count - lli_index - 1) * single_transfer_count + current_remaining
} else {
// No linked-list mode.
current_remaining
}
}
fn reset_complete_count(&mut self) -> usize {
let state = &STATE[self.0.id as usize];
state.complete_count.swap(0, Ordering::AcqRel)
}
fn set_waker(&mut self, waker: &Waker) {
STATE[self.0.id as usize].waker.register(waker);
}
}
/// Ringbuffer for receiving data using GPDMA linked-list mode.
pub struct ReadableRingBuffer<'a, W: Word> {
channel: Peri<'a, AnyChannel>,
ringbuf: ReadableDmaRingBuffer<'a, W>,
table: Table<2>,
options: TransferOptions,
}
impl<'a, W: Word> ReadableRingBuffer<'a, W> {
/// Create a new ring buffer.
///
/// Transfer options are applied to the individual linked list items.
pub unsafe fn new(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
options: TransferOptions,
) -> Self {
let channel: Peri<'a, AnyChannel> = channel.into();
let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::PeripheralToMemory);
Self {
channel,
ringbuf: ReadableDmaRingBuffer::new(buffer),
table,
options,
}
}
/// Start the ring buffer operation.
pub fn start(&mut self) {
// Apply the default configuration to the channel.
unsafe { self.channel.configure_linked_list(&self.table, self.options) };
self.table.link(RunMode::Circular);
self.channel.start();
}
/// Clear all data in the ring buffer.
pub fn clear(&mut self) {
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
}
/// Read elements from the ring buffer
/// Return a tuple of the length read and the length remaining in the buffer
/// If not all of the elements were read, then there will be some elements in the buffer remaining
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
/// Error is returned if the portion to be read was overwritten by the DMA controller.
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
}
/// Read an exact number of elements from the ringbuffer.
///
/// Returns the remaining number of elements available for immediate reading.
/// Error is returned if the portion to be read was overwritten by the DMA controller.
///
/// Async/Wake Behavior:
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
/// ring buffer was created with a buffer of size 'N':
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
self.ringbuf
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
.await
}
/// The current length of the ringbuffer
pub fn len(&mut self) -> Result<usize, Error> {
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
}
/// The capacity of the ringbuffer
pub const fn capacity(&self) -> usize {
self.ringbuf.cap()
}
/// Set a waker to be woken when at least one byte is received.
pub fn set_waker(&mut self, waker: &Waker) {
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
///
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause()
}
/// Request the transfer to resume after having been paused.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether this transfer is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_pause`](Self::request_pause).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
/// Stop the DMA transfer and await until the buffer is full.
///
/// This disables the DMA transfer's circular mode so that the transfer
/// stops when the buffer is full.
///
/// This is designed to be used with streaming input data such as the
/// I2S/SAI or ADC.
pub async fn stop(&mut self) {
// wait until cr.susp reads as true
poll_fn(|cx| {
self.set_waker(cx.waker());
self.channel.poll_stop()
})
.await
}
}
impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
fn drop(&mut self) {
self.request_pause();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}
/// Ringbuffer for writing data using GPDMA linked-list mode.
pub struct WritableRingBuffer<'a, W: Word> {
channel: Peri<'a, AnyChannel>,
ringbuf: WritableDmaRingBuffer<'a, W>,
table: Table<2>,
options: TransferOptions,
}
impl<'a, W: Word> WritableRingBuffer<'a, W> {
/// Create a new ring buffer.
///
/// Transfer options are applied to the individual linked list items.
pub unsafe fn new(
channel: Peri<'a, impl Channel>,
request: Request,
peri_addr: *mut W,
buffer: &'a mut [W],
options: TransferOptions,
) -> Self {
let channel: Peri<'a, AnyChannel> = channel.into();
let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::MemoryToPeripheral);
Self {
channel,
ringbuf: WritableDmaRingBuffer::new(buffer),
table,
options,
}
}
/// Start the ring buffer operation.
pub fn start(&mut self) {
// Apply the default configuration to the channel.
unsafe { self.channel.configure_linked_list(&self.table, self.options) };
self.table.link(RunMode::Circular);
self.channel.start();
}
/// Clear all data in the ring buffer.
pub fn clear(&mut self) {
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
}
/// Write elements directly to the raw buffer.
/// This can be used to fill the buffer before starting the DMA transfer.
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
self.ringbuf.write_immediate(buf)
}
/// Write elements from the ring buffer
/// Return a tuple of the length written and the length remaining in the buffer
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
}
/// Write an exact number of elements to the ringbuffer.
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
self.ringbuf
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
.await
}
/// Wait for any ring buffer write error.
pub async fn wait_write_error(&mut self) -> Result<usize, Error> {
self.ringbuf
.wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow()))
.await
}
/// The current length of the ringbuffer
pub fn len(&mut self) -> Result<usize, Error> {
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
}
/// The capacity of the ringbuffer
pub const fn capacity(&self) -> usize {
self.ringbuf.cap()
}
/// Set a waker to be woken when at least one byte is received.
pub fn set_waker(&mut self, waker: &Waker) {
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
}
/// Request the DMA to suspend.
///
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause()
}
/// Request the DMA to resume transfers after being suspended.
pub fn request_resume(&mut self) {
self.channel.request_resume()
}
/// Request the DMA to reset.
///
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
pub fn request_reset(&mut self) {
self.channel.request_reset()
}
/// Return whether DMA is still running.
///
/// If this returns `false`, it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`](Self::request_stop).
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
/// Stop the DMA transfer and await until the buffer is full.
///
/// This disables the DMA transfer's circular mode so that the transfer
/// stops when the buffer is full.
///
/// This is designed to be used with streaming input data such as the
/// I2S/SAI or ADC.
///
/// When using the UART, you probably want `request_stop()`.
pub async fn stop(&mut self) {
// wait until cr.susp reads as true
poll_fn(|cx| {
self.set_waker(cx.waker());
self.channel.poll_stop()
})
.await
}
}
impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
fn drop(&mut self) {
self.request_pause();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
fence(Ordering::SeqCst);
}
}

View File

@ -9,6 +9,8 @@ pub use dma_bdma::*;
#[cfg(gpdma)]
pub(crate) mod gpdma;
#[cfg(gpdma)]
pub use gpdma::ringbuffered::*;
#[cfg(gpdma)]
pub use gpdma::*;
#[cfg(dmamux)]
@ -26,10 +28,13 @@ use embassy_hal_internal::{impl_peripheral, PeripheralType};
use crate::interrupt;
/// The direction of a DMA transfer.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
enum Dir {
pub enum Dir {
/// Transfer from memory to a peripheral.
MemoryToPeripheral,
/// Transfer from a peripheral to memory.
PeripheralToMemory,
}

View File

@ -1,5 +1,3 @@
#![cfg_attr(gpdma, allow(unused))]
use core::future::poll_fn;
use core::task::{Poll, Waker};
@ -285,17 +283,20 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
}
/// Write an exact number of elements to the ringbuffer.
///
/// Returns the remaining write capacity in the buffer.
#[allow(dead_code)]
pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> {
let mut written_data = 0;
let mut written_len = 0;
let buffer_len = buffer.len();
poll_fn(|cx| {
dma.set_waker(cx.waker());
match self.write(dma, &buffer[written_data..buffer_len]) {
match self.write(dma, &buffer[written_len..buffer_len]) {
Ok((len, remaining)) => {
written_data += len;
if written_data == buffer_len {
written_len += len;
if written_len == buffer_len {
Poll::Ready(Ok(remaining))
} else {
Poll::Pending

View File

@ -454,7 +454,8 @@ impl<'d, M: Mode, IM: MasterMode> I2c<'d, M, IM> {
// (START has been ACKed or last byte when
// through)
if let Err(err) = self.wait_txis(timeout) {
if send_stop {
if send_stop && err != Error::Nack {
// STOP is sent automatically if a NACK was received
self.master_stop();
}
return Err(err);
@ -548,7 +549,9 @@ impl<'d, M: Mode, IM: MasterMode> I2c<'d, M, IM> {
(idx != last_slice_index) || (slice_len > 255),
timeout,
) {
self.master_stop();
if err != Error::Nack {
self.master_stop();
}
return Err(err);
}
}
@ -561,7 +564,9 @@ impl<'d, M: Mode, IM: MasterMode> I2c<'d, M, IM> {
(number != last_chunk_idx) || (idx != last_slice_index),
timeout,
) {
self.master_stop();
if err != Error::Nack {
self.master_stop();
}
return Err(err);
}
}
@ -571,7 +576,9 @@ impl<'d, M: Mode, IM: MasterMode> I2c<'d, M, IM> {
// (START has been ACKed or last byte when
// through)
if let Err(err) = self.wait_txis(timeout) {
self.master_stop();
if err != Error::Nack {
self.master_stop();
}
return Err(err);
}
@ -1276,7 +1283,7 @@ impl<'d> I2c<'d, Async, MultiMaster> {
} else if isr.stopf() {
self.info.regs.icr().write(|reg| reg.set_stopcf(true));
if remaining_len > 0 {
dma_transfer.request_stop();
dma_transfer.request_pause();
Poll::Ready(Ok(SendStatus::LeftoverBytes(remaining_len as usize)))
} else {
Poll::Ready(Ok(SendStatus::Done))

View File

@ -17,6 +17,7 @@ use crate::rcc::{self, RccPeripheral};
use crate::{peripherals, Peri};
/// QSPI transfer configuration.
#[derive(Clone, Copy)]
pub struct TransferConfig {
/// Instruction width (IMODE)
pub iwidth: QspiWidth,
@ -46,6 +47,7 @@ impl Default for TransferConfig {
}
/// QSPI driver configuration.
#[derive(Clone, Copy)]
pub struct Config {
/// Flash memory size representend as 2^[0-32], as reasonable minimum 1KiB(9) was chosen.
/// If you need other value the whose predefined use `Other` variant.

View File

@ -1,13 +1,11 @@
//! Serial Audio Interface (SAI)
#![macro_use]
#![cfg_attr(gpdma, allow(unused))]
use core::marker::PhantomData;
use embassy_hal_internal::PeripheralType;
pub use crate::dma::word;
#[cfg(not(gpdma))]
use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer};
use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed};
use crate::pac::sai::{vals, Sai as Regs};
@ -26,7 +24,6 @@ pub enum Error {
Overrun,
}
#[cfg(not(gpdma))]
impl From<ringbuffer::Error> for Error {
fn from(#[allow(unused)] err: ringbuffer::Error) -> Self {
#[cfg(feature = "defmt")]
@ -652,7 +649,6 @@ impl Config {
}
}
#[cfg(not(gpdma))]
enum RingBuffer<'d, W: word::Word> {
Writable(WritableRingBuffer<'d, W>),
Readable(ReadableRingBuffer<'d, W>),
@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) {
)
}
#[cfg(not(gpdma))]
fn get_ring_buffer<'d, T: Instance, W: word::Word>(
dma: Peri<'d, impl Channel>,
dma_buf: &'d mut [W],
@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> {
fs: Option<Peri<'d, AnyPin>>,
sck: Option<Peri<'d, AnyPin>>,
mclk: Option<Peri<'d, AnyPin>>,
#[cfg(gpdma)]
ring_buffer: PhantomData<W>,
#[cfg(not(gpdma))]
ring_buffer: RingBuffer<'d, W>,
sub_block: WhichSubBlock,
}
#[cfg(not(gpdma))]
impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> {
/// Create a new SAI driver in asynchronous mode with MCLK.
///

View File

@ -8,9 +8,7 @@ use embassy_sync::waitqueue::AtomicWaker;
use crate::dma::ringbuffer::Error as RingbufferError;
pub use crate::dma::word;
#[cfg(not(gpdma))]
use crate::dma::ReadableRingBuffer;
use crate::dma::{Channel, TransferOptions};
use crate::dma::{Channel, ReadableRingBuffer, TransferOptions};
use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _};
use crate::interrupt::typelevel::Interrupt;
use crate::pac::spdifrx::Spdifrx as Regs;
@ -58,7 +56,6 @@ macro_rules! impl_spdifrx_pin {
/// Ring-buffered SPDIFRX driver.
///
/// Data is read by DMAs and stored in a ring buffer.
#[cfg(not(gpdma))]
pub struct Spdifrx<'d, T: Instance> {
_peri: Peri<'d, T>,
spdifrx_in: Option<Peri<'d, AnyPin>>,
@ -118,7 +115,6 @@ impl Default for Config {
}
}
#[cfg(not(gpdma))]
impl<'d, T: Instance> Spdifrx<'d, T> {
fn dma_opts() -> TransferOptions {
TransferOptions {
@ -236,7 +232,6 @@ impl<'d, T: Instance> Spdifrx<'d, T> {
}
}
#[cfg(not(gpdma))]
impl<'d, T: Instance> Drop for Spdifrx<'d, T> {
fn drop(&mut self) {
T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00));

View File

@ -185,6 +185,16 @@ impl<'d, T: AdvancedInstance4Channel> ComplementaryPwm<'d, T> {
self.inner.set_complementary_output_polarity(channel, polarity);
}
/// Set the main output polarity for a given channel.
pub fn set_main_polarity(&mut self, channel: Channel, polarity: OutputPolarity) {
self.inner.set_output_polarity(channel, polarity);
}
/// Set the complementary output polarity for a given channel.
pub fn set_complementary_polarity(&mut self, channel: Channel, polarity: OutputPolarity) {
self.inner.set_complementary_output_polarity(channel, polarity);
}
/// Set the dead time as a proportion of max_duty
pub fn set_dead_time(&mut self, value: u16) {
let (ckd, value) = compute_dead_time_value(value);

View File

@ -490,14 +490,14 @@ impl<'d, T: Instance> PdPhy<'d, T> {
let sr = r.sr().read();
if sr.rxhrstdet() {
dma.request_stop();
dma.request_pause();
// Clean and re-enable hard reset receive interrupt.
r.icr().write(|w| w.set_rxhrstdetcf(true));
r.imr().modify(|w| w.set_rxhrstdetie(true));
Poll::Ready(Err(RxError::HardReset))
} else if sr.rxmsgend() {
dma.request_stop();
dma.request_pause();
// Should be read immediately on interrupt.
rxpaysz = r.rx_payszr().read().rxpaysz().into();

View File

@ -1965,9 +1965,7 @@ pub use buffered::*;
pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler;
mod buffered;
#[cfg(not(gpdma))]
mod ringbuffered;
#[cfg(not(gpdma))]
pub use ringbuffered::RingBufferedUartRx;
#[cfg(any(usart_v1, usart_v2))]

View File

@ -381,7 +381,7 @@ impl ReadReady for RingBufferedUartRx<'_> {
crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun,
crate::dma::ringbuffer::Error::DmaUnsynced => {
error!(
"Ringbuffer error: DmaUNsynced, driver implementation is
"Ringbuffer error: DmaUNsynced, driver implementation is
probably bugged please open an issue"
);
// we report this as overrun since its recoverable in the same way

View File

@ -912,7 +912,16 @@ impl<'d, T: Instance> driver::EndpointOut for Endpoint<'d, T, Out> {
// Software should ensure that a small delay is included before accessing the SRAM contents. This delay should be
// 800 ns in Full Speed mode and 6.4 μs in Low Speed mode.
#[cfg(stm32h5)]
embassy_time::block_for(embassy_time::Duration::from_nanos(800));
{
#[cfg(feature = "time")]
embassy_time::block_for(embassy_time::Duration::from_nanos(800));
#[cfg(not(feature = "time"))]
{
let freq = unsafe { crate::rcc::get_freqs() }.sys.to_hertz().unwrap().0 as u64;
let cycles = freq * 800 / 1_000_000;
cortex_m::asm::delay(cycles as u32);
}
}
RX_COMPLETE[index].store(false, Ordering::Relaxed);

View File

@ -25,8 +25,8 @@ cortex-m = { version = "0.7.6", features = ["inline-asm", "critical-section-sing
cortex-m-rt = "0.7.0"
panic-probe = "1.0.0"
serde = { version = "1.0.136", default-features = false }
rtos-trace = "0.1.3"
systemview-target = { version = "0.1.2", features = ["callbacks-app", "callbacks-os", "log", "cortex-m"] }
rtos-trace = "0.2"
systemview-target = { version = "0.2", features = ["callbacks-app", "callbacks-os", "log", "cortex-m"] }
log = { version = "0.4.17", optional = true }
[[bin]]

View File

@ -1,11 +1,12 @@
#![no_std]
#![no_main]
use defmt::*;
use defmt::{todo, *};
use embassy_executor::Spawner;
use embassy_nrf::config::HfclkSource;
use embassy_nrf::nfct::{Config as NfcConfig, NfcId, NfcT};
use embassy_nrf::{bind_interrupts, nfct};
use iso14443_4::{Card, IsoDep};
use {defmt_rtt as _, embassy_nrf as _, panic_probe as _};
bind_interrupts!(struct Irqs {
@ -30,12 +31,28 @@ async fn main(_spawner: Spawner) {
let mut buf = [0u8; 256];
let cc = &[
0x00, 0x0f, /* CCEN_HI, CCEN_LOW */
0x20, /* VERSION */
0x00, 0x7f, /* MLe_HI, MLe_LOW */
0x00, 0x7f, /* MLc_HI, MLc_LOW */
/* TLV */
0x04, 0x06, 0xe1, 0x04, 0x00, 0x7f, 0x00, 0x00,
];
let ndef = &[
0x00, 0x10, 0xd1, 0x1, 0xc, 0x55, 0x4, 0x65, 0x6d, 0x62, 0x61, 0x73, 0x73, 0x79, 0x2e, 0x64, 0x65, 0x76,
];
let mut selected: &[u8] = cc;
loop {
info!("activating");
nfc.activate().await;
info!("activated!");
let mut nfc = IsoDep::new(iso14443_3::Logger(&mut nfc));
loop {
info!("rxing");
let n = match nfc.receive(&mut buf).await {
Ok(n) => n,
Err(e) => {
@ -44,25 +61,51 @@ async fn main(_spawner: Spawner) {
}
};
let req = &buf[..n];
info!("received frame {:02x}", req);
info!("iso-dep rx {:02x}", req);
let mut deselect = false;
let resp = match req {
[0xe0, ..] => {
info!("Got RATS, tx'ing ATS");
&[0x06, 0x77, 0x77, 0x81, 0x02, 0x80][..]
let Ok(apdu) = Apdu::parse(req) else {
error!("apdu parse error");
break;
};
info!("apdu: {:?}", apdu);
let resp = match (apdu.cla, apdu.ins, apdu.p1, apdu.p2) {
(0, 0xa4, 4, 0) => {
info!("select app");
&[0x90, 0x00][..]
}
[0xc2] => {
info!("Got deselect!");
deselect = true;
&[0xc2]
(0, 0xa4, 0, 12) => {
info!("select df");
match apdu.data {
[0xe1, 0x03] => {
selected = cc;
&[0x90, 0x00][..]
}
[0xe1, 0x04] => {
selected = ndef;
&[0x90, 0x00][..]
}
_ => todo!(), // return NOT FOUND
}
}
(0, 0xb0, p1, p2) => {
info!("read");
let offs = u16::from_be_bytes([p1 & 0x7f, p2]) as usize;
let len = if apdu.le == 0 { usize::MAX } else { apdu.le as usize };
let n = len.min(selected.len() - offs);
buf[..n].copy_from_slice(&selected[offs..][..n]);
buf[n..][..2].copy_from_slice(&[0x90, 0x00]);
&buf[..n + 2]
}
_ => {
info!("Got unknown command!");
&[0xFF]
&[0xFF, 0xFF]
}
};
info!("iso-dep tx {:02x}", resp);
match nfc.transmit(resp).await {
Ok(()) => {}
Err(e) => {
@ -70,10 +113,211 @@ async fn main(_spawner: Spawner) {
break;
}
}
if deselect {
break;
}
}
}
}
#[derive(Debug, Clone, defmt::Format)]
struct Apdu<'a> {
pub cla: u8,
pub ins: u8,
pub p1: u8,
pub p2: u8,
pub data: &'a [u8],
pub le: u16,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, defmt::Format)]
struct ApduParseError;
impl<'a> Apdu<'a> {
pub fn parse(apdu: &'a [u8]) -> Result<Self, ApduParseError> {
if apdu.len() < 4 {
return Err(ApduParseError);
}
let (data, le) = match apdu.len() - 4 {
0 => (&[][..], 0),
1 => (&[][..], apdu[4]),
n if n == 1 + apdu[4] as usize && apdu[4] != 0 => (&apdu[5..][..apdu[4] as usize], 0),
n if n == 2 + apdu[4] as usize && apdu[4] != 0 => (&apdu[5..][..apdu[4] as usize], apdu[apdu.len() - 1]),
_ => return Err(ApduParseError),
};
Ok(Apdu {
cla: apdu[0],
ins: apdu[1],
p1: apdu[2],
p2: apdu[3],
data,
le: le as _,
})
}
}
mod iso14443_3 {
use core::future::Future;
use defmt::info;
use embassy_nrf::nfct::{Error, NfcT};
pub trait Card {
type Error;
async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error>;
async fn transmit(&mut self, buf: &[u8]) -> Result<(), Self::Error>;
}
impl<'a, T: Card> Card for &'a mut T {
type Error = T::Error;
fn receive(&mut self, buf: &mut [u8]) -> impl Future<Output = Result<usize, Self::Error>> {
T::receive(self, buf)
}
fn transmit(&mut self, buf: &[u8]) -> impl Future<Output = Result<(), Self::Error>> {
T::transmit(self, buf)
}
}
impl<'a> Card for NfcT<'a> {
type Error = Error;
fn receive(&mut self, buf: &mut [u8]) -> impl Future<Output = Result<usize, Self::Error>> {
self.receive(buf)
}
fn transmit(&mut self, buf: &[u8]) -> impl Future<Output = Result<(), Self::Error>> {
self.transmit(buf)
}
}
pub struct Logger<T: Card>(pub T);
impl<T: Card> Card for Logger<T> {
type Error = T::Error;
async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
let n = T::receive(&mut self.0, buf).await?;
info!("<- {:02x}", &buf[..n]);
Ok(n)
}
fn transmit(&mut self, buf: &[u8]) -> impl Future<Output = Result<(), Self::Error>> {
info!("-> {:02x}", buf);
T::transmit(&mut self.0, buf)
}
}
}
mod iso14443_4 {
use defmt::info;
use crate::iso14443_3;
#[derive(defmt::Format)]
pub enum Error<T> {
Deselected,
Protocol,
Lower(T),
}
pub trait Card {
type Error;
async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error>;
async fn transmit(&mut self, buf: &[u8]) -> Result<(), Self::Error>;
}
pub struct IsoDep<T: iso14443_3::Card> {
nfc: T,
/// Block count spin bit: 0 or 1
block_num: u8,
/// true if deselected. This is permanent, you must create another IsoDep
/// instance if we get selected again.
deselected: bool,
/// last response, in case we need to retransmit.
resp: [u8; 256],
resp_len: usize,
}
impl<T: iso14443_3::Card> IsoDep<T> {
pub fn new(nfc: T) -> Self {
Self {
nfc,
block_num: 1,
deselected: false,
resp: [0u8; 256],
resp_len: 0,
}
}
}
impl<T: iso14443_3::Card> Card for IsoDep<T> {
type Error = Error<T::Error>;
async fn receive(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
if self.deselected {
return Err(Error::Deselected);
}
let mut temp = [0u8; 256];
loop {
let n = self.nfc.receive(&mut temp).await.map_err(Error::Lower)?;
assert!(n != 0);
match temp[0] {
0x02 | 0x03 => {
self.block_num ^= 0x01;
assert!(temp[0] == 0x02 | self.block_num);
buf[..n - 1].copy_from_slice(&temp[1..n]);
return Ok(n - 1);
}
0xb2 | 0xb3 => {
if temp[0] & 0x01 != self.block_num {
info!("Got NAK, transmitting ACK.");
let resp = &[0xA2 | self.block_num];
self.nfc.transmit(resp).await.map_err(Error::Lower)?;
} else {
info!("Got NAK, retransmitting.");
let resp: &[u8] = &self.resp[..self.resp_len];
self.nfc.transmit(resp).await.map_err(Error::Lower)?;
}
}
0xe0 => {
info!("Got RATS, tx'ing ATS");
let resp = &[0x06, 0x77, 0x77, 0x81, 0x02, 0x80];
self.nfc.transmit(resp).await.map_err(Error::Lower)?;
}
0xc2 => {
info!("Got deselect!");
self.deselected = true;
let resp = &[0xC2];
self.nfc.transmit(resp).await.map_err(Error::Lower)?;
return Err(Error::Deselected);
}
_ => {
info!("Got unknown command {:02x}!", temp[0]);
return Err(Error::Protocol);
}
};
}
}
async fn transmit(&mut self, buf: &[u8]) -> Result<(), Self::Error> {
if self.deselected {
return Err(Error::Deselected);
}
self.resp[0] = 0x02 | self.block_num;
self.resp[1..][..buf.len()].copy_from_slice(buf);
self.resp_len = 1 + buf.len();
let resp: &[u8] = &self.resp[..self.resp_len];
self.nfc.transmit(resp).await.map_err(Error::Lower)?;
Ok(())
}
}
}

View File

@ -0,0 +1,52 @@
#![no_std]
#![no_main]
use defmt::info;
use embassy_executor::Spawner;
use embassy_stm32::{sai, Config};
use {defmt_rtt as _, panic_probe as _};
#[embassy_executor::main]
async fn main(_spawner: Spawner) {
info!("Hello world.");
let mut config = Config::default();
{
use embassy_stm32::rcc::*;
config.rcc.pll2 = Some(Pll {
source: PllSource::HSI,
prediv: PllPreDiv::DIV16,
mul: PllMul::MUL32,
divp: Some(PllDiv::DIV16), // 8 MHz SAI clock
divq: None,
divr: None,
});
config.rcc.mux.sai1sel = mux::Saisel::PLL2_P;
}
let p = embassy_stm32::init(config);
let mut write_buffer = [0u16; 1024];
let (_, sai_b) = sai::split_subblocks(p.SAI1);
let mut sai_b = sai::Sai::new_asynchronous(
sai_b,
p.PF8,
p.PE3,
p.PF9,
p.GPDMA1_CH0,
&mut write_buffer,
Default::default(),
);
// Populate arbitrary data.
let mut data = [0u16; 256];
for (index, sample) in data.iter_mut().enumerate() {
*sample = index as u16;
}
loop {
sai_b.write(&data).await.unwrap();
}
}

View File

@ -11,6 +11,8 @@ embassy-stm32 = { version = "0.4.0", path = "../../embassy-stm32", features = ["
embassy-sync = { version = "0.7.2", path = "../../embassy-sync", features = ["defmt"] }
embassy-executor = { version = "0.9.0", path = "../../embassy-executor", features = ["arch-cortex-m", "executor-thread", "defmt"] }
embassy-time = { version = "0.5.0", path = "../../embassy-time", features = ["defmt", "defmt-timestamp-uptime", "tick-hz-32_768"] }
embassy-usb = { version = "0.5.1", path = "../../embassy-usb", features = ["defmt"] }
embassy-futures = { version = "0.1.2", path = "../../embassy-futures" }
defmt = "1.0.1"
defmt-rtt = "1.0.0"

View File

@ -0,0 +1,95 @@
#![no_std]
#![no_main]
use defmt::{panic, *};
use embassy_executor::Spawner;
use embassy_futures::join::join;
use embassy_stm32::usb::{self, Driver, Instance};
use embassy_stm32::{bind_interrupts, peripherals};
use embassy_usb::class::cdc_acm::{CdcAcmClass, State};
use embassy_usb::driver::EndpointError;
use embassy_usb::Builder;
use {defmt_rtt as _, panic_probe as _};
bind_interrupts!(struct Irqs {
USB => usb::InterruptHandler<peripherals::USB>;
});
#[embassy_executor::main]
async fn main(_spawner: Spawner) {
let mut config = embassy_stm32::Config::default();
{
use embassy_stm32::rcc::*;
config.rcc.hsi = true;
config.rcc.pll = Some(Pll {
source: PllSource::HSI,
mul: PllMul::MUL6, // PLLVCO = 16*6 = 96Mhz
div: PllDiv::DIV3, // 32Mhz clock (16 * 6 / 3)
});
config.rcc.sys = Sysclk::PLL1_R;
}
let p = embassy_stm32::init(config);
info!("Hello World!");
let driver = Driver::new(p.USB, Irqs, p.PA12, p.PA11);
let mut config = embassy_usb::Config::new(0xc0de, 0xcafe);
config.manufacturer = Some("Embassy");
config.product = Some("USB-Serial Example");
config.serial_number = Some("123456");
let mut config_descriptor = [0; 256];
let mut bos_descriptor = [0; 256];
let mut control_buf = [0; 64];
let mut state = State::new();
let mut builder = Builder::new(
driver,
config,
&mut config_descriptor,
&mut bos_descriptor,
&mut [], // no msos descriptors
&mut control_buf,
);
let mut class = CdcAcmClass::new(&mut builder, &mut state, 64);
let mut usb = builder.build();
let usb_fut = usb.run();
let echo_fut = async {
loop {
class.wait_connection().await;
info!("Connected");
let _ = echo(&mut class).await;
info!("Disconnected");
}
};
join(usb_fut, echo_fut).await;
}
struct Disconnected {}
impl From<EndpointError> for Disconnected {
fn from(val: EndpointError) -> Self {
match val {
EndpointError::BufferOverflow => panic!("Buffer overflow"),
EndpointError::Disabled => Disconnected {},
}
}
}
async fn echo<'d, T: Instance + 'd>(class: &mut CdcAcmClass<'d, Driver<'d, T>>) -> Result<(), Disconnected> {
let mut buf = [0; 64];
loop {
let n = class.read_packet(&mut buf).await?;
let data = &buf[..n];
info!("data: {:x}", data);
class.write_packet(data).await?;
}
}