mirror of
https://github.com/embassy-rs/embassy.git
synced 2025-09-27 12:20:37 +00:00
Merge pull request #3923 from elagil/gpdma_ll_ringbuf_support
Add GPDMA linked-list + ringbuffer support
This commit is contained in:
commit
f6414d8cd2
@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
- feat: stm32/adc/v3: allow DMA reads to loop through enable channels
|
||||
- fix: Fix XSPI not disabling alternate bytes when they were previously enabled
|
||||
- fix: Fix stm32h7rs init when using external flash via XSPI
|
||||
- feat: Add GPDMA linked-list + ringbuffer support ([#3923](https://github.com/embassy-rs/embassy/pull/3923))
|
||||
|
||||
## 0.3.0 - 2025-08-12
|
||||
|
||||
@ -135,7 +136,7 @@ GPIO:
|
||||
- Refactor AfType ([#3031](https://github.com/embassy-rs/embassy/pull/3031))
|
||||
- Gpiov1: Do not call set_speed for AFType::Input ([#2996](https://github.com/embassy-rs/embassy/pull/2996))
|
||||
|
||||
UART:
|
||||
UART:
|
||||
- Add embedded-io impls ([#2739](https://github.com/embassy-rs/embassy/pull/2739))
|
||||
- Add support for changing baud rate ([#3512](https://github.com/embassy-rs/embassy/pull/3512))
|
||||
- Add split_ref ([#3500](https://github.com/embassy-rs/embassy/pull/3500))
|
||||
@ -159,7 +160,7 @@ UART:
|
||||
- Wake receive task for each received byte ([#2722](https://github.com/embassy-rs/embassy/pull/2722))
|
||||
- Fix dma and idle line detection in ringbuffereduartrx ([#3319](https://github.com/embassy-rs/embassy/pull/3319))
|
||||
|
||||
SPI:
|
||||
SPI:
|
||||
- Add MISO pullup configuration option ([#2943](https://github.com/embassy-rs/embassy/pull/2943))
|
||||
- Add slew rate configuration options ([#3669](https://github.com/embassy-rs/embassy/pull/3669))
|
||||
- Fix blocking_write on nosck spi. ([#3035](https://github.com/embassy-rs/embassy/pull/3035))
|
||||
|
@ -1814,7 +1814,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
|
||||
// Configure DMA to transfer input to crypto core.
|
||||
let dst_ptr: *mut u32 = T::regs().din().as_ptr();
|
||||
let options = TransferOptions {
|
||||
#[cfg(not(gpdma))]
|
||||
priority: crate::dma::Priority::High,
|
||||
..Default::default()
|
||||
};
|
||||
@ -1834,7 +1833,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
|
||||
// Configure DMA to transfer input to crypto core.
|
||||
let dst_ptr: *mut u32 = T::regs().din().as_ptr();
|
||||
let options = TransferOptions {
|
||||
#[cfg(not(gpdma))]
|
||||
priority: crate::dma::Priority::High,
|
||||
..Default::default()
|
||||
};
|
||||
@ -1853,7 +1851,6 @@ impl<'d, T: Instance> Cryp<'d, T, Async> {
|
||||
// Configure DMA to get output from crypto core.
|
||||
let src_ptr = T::regs().dout().as_ptr();
|
||||
let options = TransferOptions {
|
||||
#[cfg(not(gpdma))]
|
||||
priority: crate::dma::Priority::VeryHigh,
|
||||
..Default::default()
|
||||
};
|
||||
|
@ -498,7 +498,31 @@ impl AnyChannel {
|
||||
}
|
||||
}
|
||||
|
||||
fn request_stop(&self) {
|
||||
fn request_pause(&self) {
|
||||
let info = self.info();
|
||||
match self.info().dma {
|
||||
#[cfg(dma)]
|
||||
DmaInfo::Dma(r) => {
|
||||
// Disable the channel without overwriting the existing configuration
|
||||
r.st(info.num).cr().modify(|w| {
|
||||
w.set_en(false);
|
||||
});
|
||||
}
|
||||
#[cfg(bdma)]
|
||||
DmaInfo::Bdma(r) => {
|
||||
// Disable the channel without overwriting the existing configuration
|
||||
r.ch(info.num).cr().modify(|w| {
|
||||
w.set_en(false);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn request_resume(&self) {
|
||||
self.start()
|
||||
}
|
||||
|
||||
fn request_reset(&self) {
|
||||
let info = self.info();
|
||||
match self.info().dma {
|
||||
#[cfg(dma)]
|
||||
@ -518,26 +542,8 @@ impl AnyChannel {
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn request_pause(&self) {
|
||||
let info = self.info();
|
||||
match self.info().dma {
|
||||
#[cfg(dma)]
|
||||
DmaInfo::Dma(r) => {
|
||||
// Disable the channel without overwriting the existing configuration
|
||||
r.st(info.num).cr().modify(|w| {
|
||||
w.set_en(false);
|
||||
});
|
||||
}
|
||||
#[cfg(bdma)]
|
||||
DmaInfo::Bdma(r) => {
|
||||
// Disable the channel without overwriting the existing configuration
|
||||
r.ch(info.num).cr().modify(|w| {
|
||||
w.set_en(false);
|
||||
});
|
||||
}
|
||||
}
|
||||
while self.is_running() {}
|
||||
}
|
||||
|
||||
fn is_running(&self) -> bool {
|
||||
@ -710,27 +716,31 @@ impl<'a> Transfer<'a> {
|
||||
Self { channel }
|
||||
}
|
||||
|
||||
/// Request the transfer to stop.
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_stop(&mut self) {
|
||||
self.channel.request_stop()
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
/// To restart the transfer, call [`start`](Self::start) again.
|
||||
///
|
||||
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_pause(&mut self) {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the transfer to resume after having been paused.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether this transfer is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||
/// it was requested to stop early with [`request_pause`](Self::request_pause).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
@ -754,7 +764,7 @@ impl<'a> Transfer<'a> {
|
||||
|
||||
impl<'a> Drop for Transfer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
self.request_reset();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
@ -901,15 +911,6 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
||||
}
|
||||
|
||||
/// Request the DMA to stop.
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_stop(&mut self) {
|
||||
self.channel.request_stop()
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
/// To restart the transfer, call [`start`](Self::start) again.
|
||||
///
|
||||
@ -918,10 +919,23 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the transfer to resume after having been paused.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether DMA is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||
/// it was requested to stop early with [`request_reset`](Self::request_reset).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
@ -934,7 +948,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||
/// This is designed to be used with streaming input data such as the
|
||||
/// I2S/SAI or ADC.
|
||||
///
|
||||
/// When using the UART, you probably want `request_stop()`.
|
||||
/// When using the UART, you probably want `request_reset()`.
|
||||
pub async fn stop(&mut self) {
|
||||
self.channel.disable_circular_mode();
|
||||
//wait until cr.susp reads as true
|
||||
@ -948,7 +962,7 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||
|
||||
impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
self.request_reset();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
@ -1058,8 +1072,8 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_stop(&mut self) {
|
||||
self.channel.request_stop()
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
@ -1073,7 +1087,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
||||
/// Return whether DMA is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||
/// it was requested to stop early with [`request_reset`](Self::request_reset).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
@ -1098,7 +1112,7 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
||||
|
||||
impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
self.request_reset();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
|
@ -1,339 +0,0 @@
|
||||
#![macro_use]
|
||||
|
||||
use core::future::Future;
|
||||
use core::pin::Pin;
|
||||
use core::sync::atomic::{fence, Ordering};
|
||||
use core::task::{Context, Poll};
|
||||
|
||||
use embassy_hal_internal::Peri;
|
||||
use embassy_sync::waitqueue::AtomicWaker;
|
||||
|
||||
use super::word::{Word, WordSize};
|
||||
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
||||
use crate::interrupt::typelevel::Interrupt;
|
||||
use crate::interrupt::Priority;
|
||||
use crate::pac;
|
||||
use crate::pac::gpdma::vals;
|
||||
|
||||
pub(crate) struct ChannelInfo {
|
||||
pub(crate) dma: pac::gpdma::Gpdma,
|
||||
pub(crate) num: usize,
|
||||
#[cfg(feature = "_dual-core")]
|
||||
pub(crate) irq: pac::Interrupt,
|
||||
}
|
||||
|
||||
/// GPDMA transfer options.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
#[non_exhaustive]
|
||||
pub struct TransferOptions {}
|
||||
|
||||
impl Default for TransferOptions {
|
||||
fn default() -> Self {
|
||||
Self {}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<WordSize> for vals::Dw {
|
||||
fn from(raw: WordSize) -> Self {
|
||||
match raw {
|
||||
WordSize::OneByte => Self::BYTE,
|
||||
WordSize::TwoBytes => Self::HALF_WORD,
|
||||
WordSize::FourBytes => Self::WORD,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ChannelState {
|
||||
waker: AtomicWaker,
|
||||
}
|
||||
|
||||
impl ChannelState {
|
||||
pub(crate) const NEW: Self = Self {
|
||||
waker: AtomicWaker::new(),
|
||||
};
|
||||
}
|
||||
|
||||
/// safety: must be called only once
|
||||
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
|
||||
foreach_interrupt! {
|
||||
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
|
||||
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
|
||||
#[cfg(not(feature = "_dual-core"))]
|
||||
crate::interrupt::typelevel::$irq::enable();
|
||||
};
|
||||
}
|
||||
crate::_generated::init_gpdma();
|
||||
}
|
||||
|
||||
impl AnyChannel {
|
||||
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
||||
pub(crate) unsafe fn on_irq(&self) {
|
||||
let info = self.info();
|
||||
#[cfg(feature = "_dual-core")]
|
||||
{
|
||||
use embassy_hal_internal::interrupt::InterruptExt as _;
|
||||
info.irq.enable();
|
||||
}
|
||||
|
||||
let state = &STATE[self.id as usize];
|
||||
|
||||
let ch = info.dma.ch(info.num);
|
||||
let sr = ch.sr().read();
|
||||
|
||||
if sr.dtef() {
|
||||
panic!(
|
||||
"DMA: data transfer error on DMA@{:08x} channel {}",
|
||||
info.dma.as_ptr() as u32,
|
||||
info.num
|
||||
);
|
||||
}
|
||||
if sr.usef() {
|
||||
panic!(
|
||||
"DMA: user settings error on DMA@{:08x} channel {}",
|
||||
info.dma.as_ptr() as u32,
|
||||
info.num
|
||||
);
|
||||
}
|
||||
|
||||
if sr.suspf() || sr.tcf() {
|
||||
// disable all xxIEs to prevent the irq from firing again.
|
||||
ch.cr().write(|_| {});
|
||||
|
||||
// Wake the future. It'll look at tcf and see it's set.
|
||||
state.waker.wake();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// DMA transfer.
|
||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||
pub struct Transfer<'a> {
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
}
|
||||
|
||||
impl<'a> Transfer<'a> {
|
||||
/// Create a new read DMA transfer (peripheral to memory).
|
||||
pub unsafe fn new_read<W: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut W,
|
||||
buf: &'a mut [W],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_read_raw(channel, request, peri_addr, buf, options)
|
||||
}
|
||||
|
||||
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
||||
pub unsafe fn new_read_raw<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut PW,
|
||||
buf: *mut [MW],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::PeripheralToMemory,
|
||||
peri_addr as *const u32,
|
||||
buf as *mut MW as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
PW::size(),
|
||||
MW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral).
|
||||
pub unsafe fn new_write<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
buf: &'a [MW],
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_write_raw(channel, request, buf, peri_addr, options)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
||||
pub unsafe fn new_write_raw<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
buf: *const [MW],
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::MemoryToPeripheral,
|
||||
peri_addr as *const u32,
|
||||
buf as *const MW as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
MW::size(),
|
||||
PW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
||||
pub unsafe fn new_write_repeated<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
repeated: &'a MW,
|
||||
count: usize,
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::MemoryToPeripheral,
|
||||
peri_addr as *const u32,
|
||||
repeated as *const MW as *mut u32,
|
||||
count,
|
||||
false,
|
||||
MW::size(),
|
||||
PW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
unsafe fn new_inner(
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
request: Request,
|
||||
dir: Dir,
|
||||
peri_addr: *const u32,
|
||||
mem_addr: *mut u32,
|
||||
mem_len: usize,
|
||||
incr_mem: bool,
|
||||
data_size: WordSize,
|
||||
dst_size: WordSize,
|
||||
_options: TransferOptions,
|
||||
) -> Self {
|
||||
// BNDT is specified as bytes, not as number of transfers.
|
||||
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
|
||||
panic!("DMA transfers may not be larger than 65535 bytes.");
|
||||
};
|
||||
|
||||
let info = channel.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
let this = Self { channel };
|
||||
|
||||
ch.cr().write(|w| w.set_reset(true));
|
||||
ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs
|
||||
ch.llr().write(|_| {}); // no linked list
|
||||
ch.tr1().write(|w| {
|
||||
w.set_sdw(data_size.into());
|
||||
w.set_ddw(dst_size.into());
|
||||
w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
|
||||
w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
|
||||
});
|
||||
ch.tr2().write(|w| {
|
||||
w.set_dreq(match dir {
|
||||
Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
|
||||
Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
|
||||
});
|
||||
w.set_reqsel(request);
|
||||
});
|
||||
ch.tr3().write(|_| {}); // no address offsets.
|
||||
ch.br1().write(|w| w.set_bndt(bndt));
|
||||
|
||||
match dir {
|
||||
Dir::MemoryToPeripheral => {
|
||||
ch.sar().write_value(mem_addr as _);
|
||||
ch.dar().write_value(peri_addr as _);
|
||||
}
|
||||
Dir::PeripheralToMemory => {
|
||||
ch.sar().write_value(peri_addr as _);
|
||||
ch.dar().write_value(mem_addr as _);
|
||||
}
|
||||
}
|
||||
|
||||
ch.cr().write(|w| {
|
||||
// Enable interrupts
|
||||
w.set_tcie(true);
|
||||
w.set_useie(true);
|
||||
w.set_dteie(true);
|
||||
w.set_suspie(true);
|
||||
|
||||
// Start it
|
||||
w.set_en(true);
|
||||
});
|
||||
|
||||
this
|
||||
}
|
||||
|
||||
/// Request the transfer to stop.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_stop(&mut self) {
|
||||
let info = self.channel.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
ch.cr().modify(|w| w.set_susp(true))
|
||||
}
|
||||
|
||||
/// Return whether this transfer is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
let info = self.channel.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
let sr = ch.sr().read();
|
||||
!sr.tcf() && !sr.suspf()
|
||||
}
|
||||
|
||||
/// Gets the total remaining transfers for the channel
|
||||
/// Note: this will be zero for transfers that completed without cancellation.
|
||||
pub fn get_remaining_transfers(&self) -> u16 {
|
||||
let info = self.channel.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
ch.br1().read().bndt()
|
||||
}
|
||||
|
||||
/// Blocking wait until the transfer finishes.
|
||||
pub fn blocking_wait(mut self) {
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
core::mem::forget(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for Transfer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.request_stop();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Unpin for Transfer<'a> {}
|
||||
impl<'a> Future for Transfer<'a> {
|
||||
type Output = ();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let state = &STATE[self.channel.id as usize];
|
||||
state.waker.register(cx.waker());
|
||||
|
||||
if self.is_running() {
|
||||
Poll::Pending
|
||||
} else {
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
267
embassy-stm32/src/dma/gpdma/linked_list.rs
Normal file
267
embassy-stm32/src/dma/gpdma/linked_list.rs
Normal file
@ -0,0 +1,267 @@
|
||||
//! Implementation of the GPDMA linked list and linked list items.
|
||||
#![macro_use]
|
||||
|
||||
use stm32_metapac::gpdma::regs;
|
||||
use stm32_metapac::gpdma::vals::Dreq;
|
||||
|
||||
use crate::dma::word::{Word, WordSize};
|
||||
use crate::dma::{Dir, Request};
|
||||
|
||||
/// The mode in which to run the linked list.
|
||||
#[derive(Debug)]
|
||||
pub enum RunMode {
|
||||
/// List items are not linked together.
|
||||
Unlinked,
|
||||
/// The list is linked sequentially and only run once.
|
||||
Once,
|
||||
/// The list is linked sequentially, and the end of the list is linked to the beginning.
|
||||
Circular,
|
||||
}
|
||||
|
||||
/// A linked-list item for linear GPDMA transfers.
|
||||
///
|
||||
/// Also works for 2D-capable GPDMA channels, but does not use 2D capabilities.
|
||||
#[derive(Debug, Copy, Clone, Default)]
|
||||
#[repr(C)]
|
||||
pub struct LinearItem {
|
||||
/// Transfer register 1.
|
||||
pub tr1: regs::ChTr1,
|
||||
/// Transfer register 2.
|
||||
pub tr2: regs::ChTr2,
|
||||
/// Block register 2.
|
||||
pub br1: regs::ChBr1,
|
||||
/// Source address register.
|
||||
pub sar: u32,
|
||||
/// Destination address register.
|
||||
pub dar: u32,
|
||||
/// Linked-list address register.
|
||||
pub llr: regs::ChLlr,
|
||||
}
|
||||
|
||||
impl LinearItem {
|
||||
/// Create a new read DMA transfer (peripheral to memory).
|
||||
pub unsafe fn new_read<'d, W: Word>(request: Request, peri_addr: *mut W, buf: &'d mut [W]) -> Self {
|
||||
Self::new_inner(
|
||||
request,
|
||||
Dir::PeripheralToMemory,
|
||||
peri_addr as *const u32,
|
||||
buf as *mut [W] as *mut W as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
W::size(),
|
||||
W::size(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral).
|
||||
pub unsafe fn new_write<'d, MW: Word, PW: Word>(request: Request, buf: &'d [MW], peri_addr: *mut PW) -> Self {
|
||||
Self::new_inner(
|
||||
request,
|
||||
Dir::MemoryToPeripheral,
|
||||
peri_addr as *const u32,
|
||||
buf as *const [MW] as *const MW as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
MW::size(),
|
||||
PW::size(),
|
||||
)
|
||||
}
|
||||
|
||||
unsafe fn new_inner(
|
||||
request: Request,
|
||||
dir: Dir,
|
||||
peri_addr: *const u32,
|
||||
mem_addr: *mut u32,
|
||||
mem_len: usize,
|
||||
incr_mem: bool,
|
||||
data_size: WordSize,
|
||||
dst_size: WordSize,
|
||||
) -> Self {
|
||||
// BNDT is specified as bytes, not as number of transfers.
|
||||
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
|
||||
panic!("DMA transfers may not be larger than 65535 bytes.");
|
||||
};
|
||||
|
||||
let mut br1 = regs::ChBr1(0);
|
||||
br1.set_bndt(bndt);
|
||||
|
||||
let mut tr1 = regs::ChTr1(0);
|
||||
tr1.set_sdw(data_size.into());
|
||||
tr1.set_ddw(dst_size.into());
|
||||
tr1.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
|
||||
tr1.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
|
||||
|
||||
let mut tr2 = regs::ChTr2(0);
|
||||
tr2.set_dreq(match dir {
|
||||
Dir::MemoryToPeripheral => Dreq::DESTINATION_PERIPHERAL,
|
||||
Dir::PeripheralToMemory => Dreq::SOURCE_PERIPHERAL,
|
||||
});
|
||||
tr2.set_reqsel(request);
|
||||
|
||||
let (sar, dar) = match dir {
|
||||
Dir::MemoryToPeripheral => (mem_addr as _, peri_addr as _),
|
||||
Dir::PeripheralToMemory => (peri_addr as _, mem_addr as _),
|
||||
};
|
||||
|
||||
let llr = regs::ChLlr(0);
|
||||
|
||||
Self {
|
||||
tr1,
|
||||
tr2,
|
||||
br1,
|
||||
sar,
|
||||
dar,
|
||||
llr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Link to the next linear item at the given address.
|
||||
///
|
||||
/// Enables channel update bits.
|
||||
fn link_to(&mut self, next: u16) {
|
||||
let mut llr = regs::ChLlr(0);
|
||||
|
||||
llr.set_ut1(true);
|
||||
llr.set_ut2(true);
|
||||
llr.set_ub1(true);
|
||||
llr.set_usa(true);
|
||||
llr.set_uda(true);
|
||||
llr.set_ull(true);
|
||||
|
||||
// Lower two bits are ignored: 32 bit aligned.
|
||||
llr.set_la(next >> 2);
|
||||
|
||||
self.llr = llr;
|
||||
}
|
||||
|
||||
/// Unlink the next linear item.
|
||||
///
|
||||
/// Disables channel update bits.
|
||||
fn unlink(&mut self) {
|
||||
self.llr = regs::ChLlr(0);
|
||||
}
|
||||
|
||||
/// The item's transfer count in number of words.
|
||||
fn transfer_count(&self) -> usize {
|
||||
let word_size: WordSize = self.tr1.ddw().into();
|
||||
self.br1.bndt() as usize / word_size.bytes()
|
||||
}
|
||||
}
|
||||
|
||||
/// A table of linked list items.
|
||||
#[repr(C)]
|
||||
pub struct Table<const ITEM_COUNT: usize> {
|
||||
/// The items.
|
||||
pub items: [LinearItem; ITEM_COUNT],
|
||||
}
|
||||
|
||||
impl<const ITEM_COUNT: usize> Table<ITEM_COUNT> {
|
||||
/// Create a new table.
|
||||
pub fn new(items: [LinearItem; ITEM_COUNT]) -> Self {
|
||||
assert!(!items.is_empty());
|
||||
|
||||
Self { items }
|
||||
}
|
||||
|
||||
/// Create a ping-pong linked-list table.
|
||||
///
|
||||
/// This uses two linked-list items, one for each half of the buffer.
|
||||
pub unsafe fn new_ping_pong<W: Word>(
|
||||
request: Request,
|
||||
peri_addr: *mut W,
|
||||
buffer: &mut [W],
|
||||
direction: Dir,
|
||||
) -> Table<2> {
|
||||
// Buffer halves should be the same length.
|
||||
let half_len = buffer.len() / 2;
|
||||
assert_eq!(half_len * 2, buffer.len());
|
||||
|
||||
let items = match direction {
|
||||
Dir::MemoryToPeripheral => [
|
||||
LinearItem::new_write(request, &mut buffer[..half_len], peri_addr),
|
||||
LinearItem::new_write(request, &mut buffer[half_len..], peri_addr),
|
||||
],
|
||||
Dir::PeripheralToMemory => [
|
||||
LinearItem::new_read(request, peri_addr, &mut buffer[..half_len]),
|
||||
LinearItem::new_read(request, peri_addr, &mut buffer[half_len..]),
|
||||
],
|
||||
};
|
||||
|
||||
Table::new(items)
|
||||
}
|
||||
|
||||
/// Link the table as given by the run mode.
|
||||
pub fn link(&mut self, run_mode: RunMode) {
|
||||
if matches!(run_mode, RunMode::Once | RunMode::Circular) {
|
||||
self.link_sequential();
|
||||
}
|
||||
|
||||
if matches!(run_mode, RunMode::Circular) {
|
||||
self.link_repeat();
|
||||
}
|
||||
}
|
||||
|
||||
/// The number of linked list items.
|
||||
pub fn len(&self) -> usize {
|
||||
self.items.len()
|
||||
}
|
||||
|
||||
/// The total transfer count of the table in number of words.
|
||||
pub fn transfer_count(&self) -> usize {
|
||||
let mut count = 0;
|
||||
for item in self.items {
|
||||
count += item.transfer_count() as usize
|
||||
}
|
||||
|
||||
count
|
||||
}
|
||||
|
||||
/// Link items of given indices together: first -> second.
|
||||
pub fn link_indices(&mut self, first: usize, second: usize) {
|
||||
assert!(first < self.len());
|
||||
assert!(second < self.len());
|
||||
|
||||
let second_item = self.offset_address(second);
|
||||
self.items[first].link_to(second_item);
|
||||
}
|
||||
|
||||
/// Link items sequentially.
|
||||
pub fn link_sequential(&mut self) {
|
||||
if self.len() > 1 {
|
||||
for index in 0..(self.items.len() - 1) {
|
||||
let next = self.offset_address(index + 1);
|
||||
self.items[index].link_to(next);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Link last to first item.
|
||||
pub fn link_repeat(&mut self) {
|
||||
let first_address = self.offset_address(0);
|
||||
self.items.last_mut().unwrap().link_to(first_address);
|
||||
}
|
||||
|
||||
/// Unlink all items.
|
||||
pub fn unlink(&mut self) {
|
||||
for item in self.items.iter_mut() {
|
||||
item.unlink();
|
||||
}
|
||||
}
|
||||
|
||||
/// Linked list base address (upper 16 address bits).
|
||||
pub fn base_address(&self) -> u16 {
|
||||
((&raw const self.items as u32) >> 16) as _
|
||||
}
|
||||
|
||||
/// Linked list offset address (lower 16 address bits) at the selected index.
|
||||
pub fn offset_address(&self, index: usize) -> u16 {
|
||||
assert!(self.items.len() > index);
|
||||
|
||||
let address = &raw const self.items[index] as _;
|
||||
|
||||
// Ensure 32 bit address alignment.
|
||||
assert_eq!(address & 0b11, 0);
|
||||
|
||||
address
|
||||
}
|
||||
}
|
699
embassy-stm32/src/dma/gpdma/mod.rs
Normal file
699
embassy-stm32/src/dma/gpdma/mod.rs
Normal file
@ -0,0 +1,699 @@
|
||||
#![macro_use]
|
||||
|
||||
use core::future::Future;
|
||||
use core::pin::Pin;
|
||||
use core::sync::atomic::{fence, AtomicUsize, Ordering};
|
||||
use core::task::{Context, Poll};
|
||||
|
||||
use embassy_hal_internal::Peri;
|
||||
use embassy_sync::waitqueue::AtomicWaker;
|
||||
use linked_list::Table;
|
||||
|
||||
use super::word::{Word, WordSize};
|
||||
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
||||
use crate::interrupt::typelevel::Interrupt;
|
||||
use crate::pac;
|
||||
use crate::pac::gpdma::vals;
|
||||
|
||||
pub mod linked_list;
|
||||
pub mod ringbuffered;
|
||||
|
||||
pub(crate) struct ChannelInfo {
|
||||
pub(crate) dma: pac::gpdma::Gpdma,
|
||||
pub(crate) num: usize,
|
||||
#[cfg(feature = "_dual-core")]
|
||||
pub(crate) irq: pac::Interrupt,
|
||||
}
|
||||
|
||||
/// DMA request priority
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
pub enum Priority {
|
||||
/// Low Priority
|
||||
Low,
|
||||
/// Medium Priority
|
||||
Medium,
|
||||
/// High Priority
|
||||
High,
|
||||
/// Very High Priority
|
||||
VeryHigh,
|
||||
}
|
||||
|
||||
impl From<Priority> for pac::gpdma::vals::Prio {
|
||||
fn from(value: Priority) -> Self {
|
||||
match value {
|
||||
Priority::Low => pac::gpdma::vals::Prio::LOW_WITH_LOWH_WEIGHT,
|
||||
Priority::Medium => pac::gpdma::vals::Prio::LOW_WITH_MID_WEIGHT,
|
||||
Priority::High => pac::gpdma::vals::Prio::LOW_WITH_HIGH_WEIGHT,
|
||||
Priority::VeryHigh => pac::gpdma::vals::Prio::HIGH,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// GPDMA transfer options.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
#[non_exhaustive]
|
||||
pub struct TransferOptions {
|
||||
/// Request priority level.
|
||||
pub priority: Priority,
|
||||
/// Enable half transfer interrupt.
|
||||
pub half_transfer_ir: bool,
|
||||
/// Enable transfer complete interrupt.
|
||||
pub complete_transfer_ir: bool,
|
||||
}
|
||||
|
||||
impl Default for TransferOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
priority: Priority::VeryHigh,
|
||||
half_transfer_ir: false,
|
||||
complete_transfer_ir: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<WordSize> for vals::Dw {
|
||||
fn from(raw: WordSize) -> Self {
|
||||
match raw {
|
||||
WordSize::OneByte => Self::BYTE,
|
||||
WordSize::TwoBytes => Self::HALF_WORD,
|
||||
WordSize::FourBytes => Self::WORD,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<vals::Dw> for WordSize {
|
||||
fn from(raw: vals::Dw) -> Self {
|
||||
match raw {
|
||||
vals::Dw::BYTE => Self::OneByte,
|
||||
vals::Dw::HALF_WORD => Self::TwoBytes,
|
||||
vals::Dw::WORD => Self::FourBytes,
|
||||
_ => panic!("Invalid word size"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct LLiState {
|
||||
/// The number of linked-list items.
|
||||
count: AtomicUsize,
|
||||
/// The index of the current linked-list item.
|
||||
index: AtomicUsize,
|
||||
/// The total transfer count of all linked-list items in number of words.
|
||||
transfer_count: AtomicUsize,
|
||||
}
|
||||
|
||||
pub(crate) struct ChannelState {
|
||||
waker: AtomicWaker,
|
||||
complete_count: AtomicUsize,
|
||||
lli_state: LLiState,
|
||||
}
|
||||
|
||||
impl ChannelState {
|
||||
pub(crate) const NEW: Self = Self {
|
||||
waker: AtomicWaker::new(),
|
||||
complete_count: AtomicUsize::new(0),
|
||||
|
||||
lli_state: LLiState {
|
||||
count: AtomicUsize::new(0),
|
||||
index: AtomicUsize::new(0),
|
||||
transfer_count: AtomicUsize::new(0),
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/// safety: must be called only once
|
||||
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: crate::interrupt::Priority) {
|
||||
foreach_interrupt! {
|
||||
($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => {
|
||||
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
|
||||
#[cfg(not(feature = "_dual-core"))]
|
||||
crate::interrupt::typelevel::$irq::enable();
|
||||
};
|
||||
}
|
||||
crate::_generated::init_gpdma();
|
||||
}
|
||||
|
||||
impl AnyChannel {
|
||||
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
||||
pub(crate) unsafe fn on_irq(&self) {
|
||||
let info = self.info();
|
||||
#[cfg(feature = "_dual-core")]
|
||||
{
|
||||
use embassy_hal_internal::interrupt::InterruptExt as _;
|
||||
info.irq.enable();
|
||||
}
|
||||
|
||||
let state = &STATE[self.id as usize];
|
||||
|
||||
let ch = info.dma.ch(info.num);
|
||||
let sr = ch.sr().read();
|
||||
|
||||
if sr.dtef() {
|
||||
panic!(
|
||||
"DMA: data transfer error on DMA@{:08x} channel {}",
|
||||
info.dma.as_ptr() as u32,
|
||||
info.num
|
||||
);
|
||||
}
|
||||
if sr.usef() {
|
||||
panic!(
|
||||
"DMA: user settings error on DMA@{:08x} channel {}",
|
||||
info.dma.as_ptr() as u32,
|
||||
info.num
|
||||
);
|
||||
}
|
||||
if sr.ulef() {
|
||||
panic!(
|
||||
"DMA: link transfer error on DMA@{:08x} channel {}",
|
||||
info.dma.as_ptr() as u32,
|
||||
info.num
|
||||
);
|
||||
}
|
||||
|
||||
if sr.htf() {
|
||||
ch.fcr().write(|w| w.set_htf(true));
|
||||
}
|
||||
|
||||
if sr.tcf() {
|
||||
ch.fcr().write(|w| w.set_tcf(true));
|
||||
|
||||
let lli_count = state.lli_state.count.load(Ordering::Acquire);
|
||||
let complete = if lli_count > 0 {
|
||||
let next_lli_index = state.lli_state.index.load(Ordering::Acquire) + 1;
|
||||
let complete = next_lli_index >= lli_count;
|
||||
|
||||
state
|
||||
.lli_state
|
||||
.index
|
||||
.store(if complete { 0 } else { next_lli_index }, Ordering::Release);
|
||||
|
||||
complete
|
||||
} else {
|
||||
true
|
||||
};
|
||||
|
||||
if complete {
|
||||
state.complete_count.fetch_add(1, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
if sr.suspf() {
|
||||
// Disable all xxIEs to prevent the irq from firing again.
|
||||
ch.cr().write(|_| {});
|
||||
}
|
||||
state.waker.wake();
|
||||
}
|
||||
|
||||
fn get_remaining_transfers(&self) -> u16 {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
let word_size: WordSize = ch.tr1().read().ddw().into();
|
||||
|
||||
ch.br1().read().bndt() / word_size.bytes() as u16
|
||||
}
|
||||
|
||||
unsafe fn configure(
|
||||
&self,
|
||||
request: Request,
|
||||
dir: Dir,
|
||||
peri_addr: *const u32,
|
||||
mem_addr: *mut u32,
|
||||
mem_len: usize,
|
||||
incr_mem: bool,
|
||||
data_size: WordSize,
|
||||
dst_size: WordSize,
|
||||
options: TransferOptions,
|
||||
) {
|
||||
// BNDT is specified as bytes, not as number of transfers.
|
||||
let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else {
|
||||
panic!("DMA transfers may not be larger than 65535 bytes.");
|
||||
};
|
||||
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
ch.cr().write(|w| w.set_reset(true));
|
||||
ch.fcr().write(|w| {
|
||||
// Clear all irqs
|
||||
w.set_dtef(true);
|
||||
w.set_htf(true);
|
||||
w.set_suspf(true);
|
||||
w.set_tcf(true);
|
||||
w.set_tof(true);
|
||||
w.set_ulef(true);
|
||||
w.set_usef(true);
|
||||
});
|
||||
ch.llr().write(|_| {}); // no linked list
|
||||
ch.tr1().write(|w| {
|
||||
w.set_sdw(data_size.into());
|
||||
w.set_ddw(dst_size.into());
|
||||
w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem);
|
||||
w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem);
|
||||
});
|
||||
ch.tr2().write(|w| {
|
||||
w.set_dreq(match dir {
|
||||
Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL,
|
||||
Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL,
|
||||
});
|
||||
w.set_reqsel(request);
|
||||
});
|
||||
ch.tr3().write(|_| {}); // no address offsets.
|
||||
ch.br1().write(|w| w.set_bndt(bndt));
|
||||
|
||||
match dir {
|
||||
Dir::MemoryToPeripheral => {
|
||||
ch.sar().write_value(mem_addr as _);
|
||||
ch.dar().write_value(peri_addr as _);
|
||||
}
|
||||
Dir::PeripheralToMemory => {
|
||||
ch.sar().write_value(peri_addr as _);
|
||||
ch.dar().write_value(mem_addr as _);
|
||||
}
|
||||
}
|
||||
|
||||
ch.cr().write(|w| {
|
||||
w.set_prio(options.priority.into());
|
||||
w.set_htie(options.half_transfer_ir);
|
||||
w.set_tcie(options.complete_transfer_ir);
|
||||
w.set_useie(true);
|
||||
w.set_dteie(true);
|
||||
w.set_suspie(true);
|
||||
});
|
||||
|
||||
let state = &STATE[self.id as usize];
|
||||
state.lli_state.count.store(0, Ordering::Relaxed);
|
||||
state.lli_state.index.store(0, Ordering::Relaxed);
|
||||
state.lli_state.transfer_count.store(0, Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Configure a linked-list transfer.
|
||||
unsafe fn configure_linked_list<const ITEM_COUNT: usize>(
|
||||
&self,
|
||||
table: &Table<ITEM_COUNT>,
|
||||
options: TransferOptions,
|
||||
) {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
ch.cr().write(|w| w.set_reset(true));
|
||||
ch.fcr().write(|w| {
|
||||
// Clear all irqs
|
||||
w.set_dtef(true);
|
||||
w.set_htf(true);
|
||||
w.set_suspf(true);
|
||||
w.set_tcf(true);
|
||||
w.set_tof(true);
|
||||
w.set_ulef(true);
|
||||
w.set_usef(true);
|
||||
});
|
||||
ch.lbar().write(|reg| reg.set_lba(table.base_address()));
|
||||
|
||||
// Empty LLI0.
|
||||
ch.br1().write(|w| w.set_bndt(0));
|
||||
|
||||
// Enable all linked-list field updates.
|
||||
ch.llr().write(|w| {
|
||||
w.set_ut1(true);
|
||||
w.set_ut2(true);
|
||||
w.set_ub1(true);
|
||||
w.set_usa(true);
|
||||
w.set_uda(true);
|
||||
w.set_ull(true);
|
||||
|
||||
// Lower two bits are ignored: 32 bit aligned.
|
||||
w.set_la(table.offset_address(0) >> 2);
|
||||
});
|
||||
|
||||
ch.tr3().write(|_| {}); // no address offsets.
|
||||
|
||||
ch.cr().write(|w| {
|
||||
w.set_prio(options.priority.into());
|
||||
w.set_htie(options.half_transfer_ir);
|
||||
w.set_tcie(options.complete_transfer_ir);
|
||||
w.set_useie(true);
|
||||
w.set_uleie(true);
|
||||
w.set_dteie(true);
|
||||
w.set_suspie(true);
|
||||
});
|
||||
|
||||
let state = &STATE[self.id as usize];
|
||||
state.lli_state.count.store(ITEM_COUNT, Ordering::Relaxed);
|
||||
state.lli_state.index.store(0, Ordering::Relaxed);
|
||||
state
|
||||
.lli_state
|
||||
.transfer_count
|
||||
.store(table.transfer_count(), Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn start(&self) {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
ch.cr().modify(|w| w.set_en(true));
|
||||
}
|
||||
|
||||
fn request_pause(&self) {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
ch.cr().modify(|w| w.set_susp(true))
|
||||
}
|
||||
|
||||
fn request_resume(&self) {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
ch.cr().modify(|w| w.set_susp(false));
|
||||
}
|
||||
|
||||
fn request_reset(&self) {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
self.request_pause();
|
||||
while self.is_running() {}
|
||||
|
||||
ch.cr().modify(|w| w.set_reset(true));
|
||||
}
|
||||
|
||||
fn is_running(&self) -> bool {
|
||||
let info = self.info();
|
||||
let ch = info.dma.ch(info.num);
|
||||
|
||||
let sr = ch.sr().read();
|
||||
|
||||
!sr.suspf() && !sr.idlef()
|
||||
}
|
||||
|
||||
fn poll_stop(&self) -> Poll<()> {
|
||||
use core::sync::atomic::compiler_fence;
|
||||
compiler_fence(Ordering::SeqCst);
|
||||
|
||||
if !self.is_running() {
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Linked-list DMA transfer.
|
||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||
pub struct LinkedListTransfer<'a, const ITEM_COUNT: usize> {
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
}
|
||||
|
||||
impl<'a, const ITEM_COUNT: usize> LinkedListTransfer<'a, ITEM_COUNT> {
|
||||
/// Create a new linked-list transfer.
|
||||
pub unsafe fn new_linked_list<const N: usize>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
table: Table<ITEM_COUNT>,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner_linked_list(channel.into(), table, options)
|
||||
}
|
||||
|
||||
unsafe fn new_inner_linked_list(
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
table: Table<ITEM_COUNT>,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
channel.configure_linked_list(&table, options);
|
||||
channel.start();
|
||||
|
||||
Self { channel }
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
///
|
||||
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_pause(&mut self) {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the transfer to resume after having been paused.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether this transfer is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_pause`](Self::request_pause).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
|
||||
/// Gets the total remaining transfers for the channel
|
||||
/// Note: this will be zero for transfers that completed without cancellation.
|
||||
pub fn get_remaining_transfers(&self) -> u16 {
|
||||
self.channel.get_remaining_transfers()
|
||||
}
|
||||
|
||||
/// Blocking wait until the transfer finishes.
|
||||
pub fn blocking_wait(mut self) {
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
core::mem::forget(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, const ITEM_COUNT: usize> Drop for LinkedListTransfer<'a, ITEM_COUNT> {
|
||||
fn drop(&mut self) {
|
||||
self.request_reset();
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, const ITEM_COUNT: usize> Unpin for LinkedListTransfer<'a, ITEM_COUNT> {}
|
||||
impl<'a, const ITEM_COUNT: usize> Future for LinkedListTransfer<'a, ITEM_COUNT> {
|
||||
type Output = ();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let state = &STATE[self.channel.id as usize];
|
||||
state.waker.register(cx.waker());
|
||||
|
||||
if self.is_running() {
|
||||
Poll::Pending
|
||||
} else {
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// DMA transfer.
|
||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||
pub struct Transfer<'a> {
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
}
|
||||
|
||||
impl<'a> Transfer<'a> {
|
||||
/// Create a new read DMA transfer (peripheral to memory).
|
||||
pub unsafe fn new_read<W: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut W,
|
||||
buf: &'a mut [W],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_read_raw(channel, request, peri_addr, buf, options)
|
||||
}
|
||||
|
||||
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
||||
pub unsafe fn new_read_raw<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut PW,
|
||||
buf: *mut [MW],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::PeripheralToMemory,
|
||||
peri_addr as *const u32,
|
||||
buf as *mut MW as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
PW::size(),
|
||||
MW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral).
|
||||
pub unsafe fn new_write<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
buf: &'a [MW],
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_write_raw(channel, request, buf, peri_addr, options)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
||||
pub unsafe fn new_write_raw<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
buf: *const [MW],
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::MemoryToPeripheral,
|
||||
peri_addr as *const u32,
|
||||
buf as *const MW as *mut u32,
|
||||
buf.len(),
|
||||
true,
|
||||
MW::size(),
|
||||
PW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
||||
pub unsafe fn new_write_repeated<MW: Word, PW: Word>(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
repeated: &'a MW,
|
||||
count: usize,
|
||||
peri_addr: *mut PW,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
Self::new_inner(
|
||||
channel.into(),
|
||||
request,
|
||||
Dir::MemoryToPeripheral,
|
||||
peri_addr as *const u32,
|
||||
repeated as *const MW as *mut u32,
|
||||
count,
|
||||
false,
|
||||
MW::size(),
|
||||
PW::size(),
|
||||
options,
|
||||
)
|
||||
}
|
||||
|
||||
unsafe fn new_inner(
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
request: Request,
|
||||
dir: Dir,
|
||||
peri_addr: *const u32,
|
||||
mem_addr: *mut u32,
|
||||
mem_len: usize,
|
||||
incr_mem: bool,
|
||||
data_size: WordSize,
|
||||
peripheral_size: WordSize,
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
assert!(mem_len > 0 && mem_len <= 0xFFFF);
|
||||
|
||||
channel.configure(
|
||||
request,
|
||||
dir,
|
||||
peri_addr,
|
||||
mem_addr,
|
||||
mem_len,
|
||||
incr_mem,
|
||||
data_size,
|
||||
peripheral_size,
|
||||
options,
|
||||
);
|
||||
channel.start();
|
||||
|
||||
Self { channel }
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
/// To restart the transfer, call [`start`](Self::start) again.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_pause(&mut self) {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the transfer to resume after being suspended.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether this transfer is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_pause`](Self::request_pause).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
|
||||
/// Gets the total remaining transfers for the channel
|
||||
/// Note: this will be zero for transfers that completed without cancellation.
|
||||
pub fn get_remaining_transfers(&self) -> u16 {
|
||||
self.channel.get_remaining_transfers()
|
||||
}
|
||||
|
||||
/// Blocking wait until the transfer finishes.
|
||||
pub fn blocking_wait(mut self) {
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
|
||||
core::mem::forget(self);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Drop for Transfer<'a> {
|
||||
fn drop(&mut self) {
|
||||
self.request_pause();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> Unpin for Transfer<'a> {}
|
||||
impl<'a> Future for Transfer<'a> {
|
||||
type Output = ();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let state = &STATE[self.channel.id as usize];
|
||||
state.waker.register(cx.waker());
|
||||
|
||||
if self.is_running() {
|
||||
Poll::Pending
|
||||
} else {
|
||||
Poll::Ready(())
|
||||
}
|
||||
}
|
||||
}
|
332
embassy-stm32/src/dma/gpdma/ringbuffered.rs
Normal file
332
embassy-stm32/src/dma/gpdma/ringbuffered.rs
Normal file
@ -0,0 +1,332 @@
|
||||
//! GPDMA ring buffer implementation.
|
||||
//!
|
||||
//! FIXME: Add request_pause functionality?
|
||||
//! FIXME: Stop the DMA, if a user does not queue new transfers (chain of linked-list items ends automatically).
|
||||
use core::future::poll_fn;
|
||||
use core::sync::atomic::{fence, Ordering};
|
||||
use core::task::Waker;
|
||||
|
||||
use embassy_hal_internal::Peri;
|
||||
|
||||
use super::{AnyChannel, TransferOptions, STATE};
|
||||
use crate::dma::gpdma::linked_list::{RunMode, Table};
|
||||
use crate::dma::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer};
|
||||
use crate::dma::word::Word;
|
||||
use crate::dma::{Channel, Dir, Request};
|
||||
|
||||
struct DmaCtrlImpl<'a>(Peri<'a, AnyChannel>);
|
||||
|
||||
impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
|
||||
fn get_remaining_transfers(&self) -> usize {
|
||||
let state = &STATE[self.0.id as usize];
|
||||
let current_remaining = self.0.get_remaining_transfers() as usize;
|
||||
|
||||
let lli_count = state.lli_state.count.load(Ordering::Acquire);
|
||||
|
||||
if lli_count > 0 {
|
||||
// In linked-list mode, the remaining transfers are the sum of the full lengths of LLIs that follow,
|
||||
// and the remaining transfers for the current LLI.
|
||||
let lli_index = state.lli_state.index.load(Ordering::Acquire);
|
||||
let single_transfer_count = state.lli_state.transfer_count.load(Ordering::Acquire) / lli_count;
|
||||
|
||||
(lli_count - lli_index - 1) * single_transfer_count + current_remaining
|
||||
} else {
|
||||
// No linked-list mode.
|
||||
current_remaining
|
||||
}
|
||||
}
|
||||
|
||||
fn reset_complete_count(&mut self) -> usize {
|
||||
let state = &STATE[self.0.id as usize];
|
||||
|
||||
state.complete_count.swap(0, Ordering::AcqRel)
|
||||
}
|
||||
|
||||
fn set_waker(&mut self, waker: &Waker) {
|
||||
STATE[self.0.id as usize].waker.register(waker);
|
||||
}
|
||||
}
|
||||
|
||||
/// Ringbuffer for receiving data using GPDMA linked-list mode.
|
||||
pub struct ReadableRingBuffer<'a, W: Word> {
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
ringbuf: ReadableDmaRingBuffer<'a, W>,
|
||||
table: Table<2>,
|
||||
options: TransferOptions,
|
||||
}
|
||||
|
||||
impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||
/// Create a new ring buffer.
|
||||
///
|
||||
/// Transfer options are applied to the individual linked list items.
|
||||
pub unsafe fn new(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut W,
|
||||
buffer: &'a mut [W],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
let channel: Peri<'a, AnyChannel> = channel.into();
|
||||
let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::PeripheralToMemory);
|
||||
|
||||
Self {
|
||||
channel,
|
||||
ringbuf: ReadableDmaRingBuffer::new(buffer),
|
||||
table,
|
||||
options,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the ring buffer operation.
|
||||
pub fn start(&mut self) {
|
||||
// Apply the default configuration to the channel.
|
||||
unsafe { self.channel.configure_linked_list(&self.table, self.options) };
|
||||
self.table.link(RunMode::Circular);
|
||||
self.channel.start();
|
||||
}
|
||||
|
||||
/// Clear all data in the ring buffer.
|
||||
pub fn clear(&mut self) {
|
||||
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||
}
|
||||
|
||||
/// Read elements from the ring buffer
|
||||
/// Return a tuple of the length read and the length remaining in the buffer
|
||||
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
||||
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
||||
/// Error is returned if the portion to be read was overwritten by the DMA controller.
|
||||
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
|
||||
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||
}
|
||||
|
||||
/// Read an exact number of elements from the ringbuffer.
|
||||
///
|
||||
/// Returns the remaining number of elements available for immediate reading.
|
||||
/// Error is returned if the portion to be read was overwritten by the DMA controller.
|
||||
///
|
||||
/// Async/Wake Behavior:
|
||||
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
||||
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
||||
/// ring buffer was created with a buffer of size 'N':
|
||||
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
||||
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
||||
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
|
||||
self.ringbuf
|
||||
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||
.await
|
||||
}
|
||||
|
||||
/// The current length of the ringbuffer
|
||||
pub fn len(&mut self) -> Result<usize, Error> {
|
||||
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
|
||||
}
|
||||
|
||||
/// The capacity of the ringbuffer
|
||||
pub const fn capacity(&self) -> usize {
|
||||
self.ringbuf.cap()
|
||||
}
|
||||
|
||||
/// Set a waker to be woken when at least one byte is received.
|
||||
pub fn set_waker(&mut self, waker: &Waker) {
|
||||
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
||||
}
|
||||
|
||||
/// Request the transfer to pause, keeping the existing configuration for this channel.
|
||||
///
|
||||
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_pause(&mut self) {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the transfer to resume after having been paused.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether this transfer is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_pause`](Self::request_pause).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
|
||||
/// Stop the DMA transfer and await until the buffer is full.
|
||||
///
|
||||
/// This disables the DMA transfer's circular mode so that the transfer
|
||||
/// stops when the buffer is full.
|
||||
///
|
||||
/// This is designed to be used with streaming input data such as the
|
||||
/// I2S/SAI or ADC.
|
||||
pub async fn stop(&mut self) {
|
||||
// wait until cr.susp reads as true
|
||||
poll_fn(|cx| {
|
||||
self.set_waker(cx.waker());
|
||||
self.channel.poll_stop()
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_pause();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
||||
|
||||
/// Ringbuffer for writing data using GPDMA linked-list mode.
|
||||
pub struct WritableRingBuffer<'a, W: Word> {
|
||||
channel: Peri<'a, AnyChannel>,
|
||||
ringbuf: WritableDmaRingBuffer<'a, W>,
|
||||
table: Table<2>,
|
||||
options: TransferOptions,
|
||||
}
|
||||
|
||||
impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
||||
/// Create a new ring buffer.
|
||||
///
|
||||
/// Transfer options are applied to the individual linked list items.
|
||||
pub unsafe fn new(
|
||||
channel: Peri<'a, impl Channel>,
|
||||
request: Request,
|
||||
peri_addr: *mut W,
|
||||
buffer: &'a mut [W],
|
||||
options: TransferOptions,
|
||||
) -> Self {
|
||||
let channel: Peri<'a, AnyChannel> = channel.into();
|
||||
let table = Table::<2>::new_ping_pong::<W>(request, peri_addr, buffer, Dir::MemoryToPeripheral);
|
||||
|
||||
Self {
|
||||
channel,
|
||||
ringbuf: WritableDmaRingBuffer::new(buffer),
|
||||
table,
|
||||
options,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the ring buffer operation.
|
||||
pub fn start(&mut self) {
|
||||
// Apply the default configuration to the channel.
|
||||
unsafe { self.channel.configure_linked_list(&self.table, self.options) };
|
||||
self.table.link(RunMode::Circular);
|
||||
|
||||
self.channel.start();
|
||||
}
|
||||
|
||||
/// Clear all data in the ring buffer.
|
||||
pub fn clear(&mut self) {
|
||||
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||
}
|
||||
|
||||
/// Write elements directly to the raw buffer.
|
||||
/// This can be used to fill the buffer before starting the DMA transfer.
|
||||
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||
self.ringbuf.write_immediate(buf)
|
||||
}
|
||||
|
||||
/// Write elements from the ring buffer
|
||||
/// Return a tuple of the length written and the length remaining in the buffer
|
||||
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||
}
|
||||
|
||||
/// Write an exact number of elements to the ringbuffer.
|
||||
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
|
||||
self.ringbuf
|
||||
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Wait for any ring buffer write error.
|
||||
pub async fn wait_write_error(&mut self) -> Result<usize, Error> {
|
||||
self.ringbuf
|
||||
.wait_write_error(&mut DmaCtrlImpl(self.channel.reborrow()))
|
||||
.await
|
||||
}
|
||||
|
||||
/// The current length of the ringbuffer
|
||||
pub fn len(&mut self) -> Result<usize, Error> {
|
||||
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
|
||||
}
|
||||
|
||||
/// The capacity of the ringbuffer
|
||||
pub const fn capacity(&self) -> usize {
|
||||
self.ringbuf.cap()
|
||||
}
|
||||
|
||||
/// Set a waker to be woken when at least one byte is received.
|
||||
pub fn set_waker(&mut self, waker: &Waker) {
|
||||
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
||||
}
|
||||
|
||||
/// Request the DMA to suspend.
|
||||
///
|
||||
/// To resume the transfer, call [`request_resume`](Self::request_resume) again.
|
||||
///
|
||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||
pub fn request_pause(&mut self) {
|
||||
self.channel.request_pause()
|
||||
}
|
||||
|
||||
/// Request the DMA to resume transfers after being suspended.
|
||||
pub fn request_resume(&mut self) {
|
||||
self.channel.request_resume()
|
||||
}
|
||||
|
||||
/// Request the DMA to reset.
|
||||
///
|
||||
/// The configuration for this channel will **not be preserved**. If you need to restart the transfer
|
||||
/// at a later point with the same configuration, see [`request_pause`](Self::request_pause) instead.
|
||||
pub fn request_reset(&mut self) {
|
||||
self.channel.request_reset()
|
||||
}
|
||||
|
||||
/// Return whether DMA is still running.
|
||||
///
|
||||
/// If this returns `false`, it can be because either the transfer finished, or
|
||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||
pub fn is_running(&mut self) -> bool {
|
||||
self.channel.is_running()
|
||||
}
|
||||
|
||||
/// Stop the DMA transfer and await until the buffer is full.
|
||||
///
|
||||
/// This disables the DMA transfer's circular mode so that the transfer
|
||||
/// stops when the buffer is full.
|
||||
///
|
||||
/// This is designed to be used with streaming input data such as the
|
||||
/// I2S/SAI or ADC.
|
||||
///
|
||||
/// When using the UART, you probably want `request_stop()`.
|
||||
pub async fn stop(&mut self) {
|
||||
// wait until cr.susp reads as true
|
||||
poll_fn(|cx| {
|
||||
self.set_waker(cx.waker());
|
||||
self.channel.poll_stop()
|
||||
})
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
|
||||
fn drop(&mut self) {
|
||||
self.request_pause();
|
||||
while self.is_running() {}
|
||||
|
||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||
fence(Ordering::SeqCst);
|
||||
}
|
||||
}
|
@ -9,6 +9,8 @@ pub use dma_bdma::*;
|
||||
#[cfg(gpdma)]
|
||||
pub(crate) mod gpdma;
|
||||
#[cfg(gpdma)]
|
||||
pub use gpdma::ringbuffered::*;
|
||||
#[cfg(gpdma)]
|
||||
pub use gpdma::*;
|
||||
|
||||
#[cfg(dmamux)]
|
||||
@ -26,10 +28,13 @@ use embassy_hal_internal::{impl_peripheral, PeripheralType};
|
||||
|
||||
use crate::interrupt;
|
||||
|
||||
/// The direction of a DMA transfer.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||
enum Dir {
|
||||
pub enum Dir {
|
||||
/// Transfer from memory to a peripheral.
|
||||
MemoryToPeripheral,
|
||||
/// Transfer from a peripheral to memory.
|
||||
PeripheralToMemory,
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,3 @@
|
||||
#![cfg_attr(gpdma, allow(unused))]
|
||||
|
||||
use core::future::poll_fn;
|
||||
use core::task::{Poll, Waker};
|
||||
|
||||
@ -285,17 +283,20 @@ impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
|
||||
}
|
||||
|
||||
/// Write an exact number of elements to the ringbuffer.
|
||||
///
|
||||
/// Returns the remaining write capacity in the buffer.
|
||||
#[allow(dead_code)]
|
||||
pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> {
|
||||
let mut written_data = 0;
|
||||
let mut written_len = 0;
|
||||
let buffer_len = buffer.len();
|
||||
|
||||
poll_fn(|cx| {
|
||||
dma.set_waker(cx.waker());
|
||||
|
||||
match self.write(dma, &buffer[written_data..buffer_len]) {
|
||||
match self.write(dma, &buffer[written_len..buffer_len]) {
|
||||
Ok((len, remaining)) => {
|
||||
written_data += len;
|
||||
if written_data == buffer_len {
|
||||
written_len += len;
|
||||
if written_len == buffer_len {
|
||||
Poll::Ready(Ok(remaining))
|
||||
} else {
|
||||
Poll::Pending
|
||||
|
@ -1283,7 +1283,7 @@ impl<'d> I2c<'d, Async, MultiMaster> {
|
||||
} else if isr.stopf() {
|
||||
self.info.regs.icr().write(|reg| reg.set_stopcf(true));
|
||||
if remaining_len > 0 {
|
||||
dma_transfer.request_stop();
|
||||
dma_transfer.request_pause();
|
||||
Poll::Ready(Ok(SendStatus::LeftoverBytes(remaining_len as usize)))
|
||||
} else {
|
||||
Poll::Ready(Ok(SendStatus::Done))
|
||||
|
@ -1,13 +1,11 @@
|
||||
//! Serial Audio Interface (SAI)
|
||||
#![macro_use]
|
||||
#![cfg_attr(gpdma, allow(unused))]
|
||||
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use embassy_hal_internal::PeripheralType;
|
||||
|
||||
pub use crate::dma::word;
|
||||
#[cfg(not(gpdma))]
|
||||
use crate::dma::{ringbuffer, Channel, ReadableRingBuffer, Request, TransferOptions, WritableRingBuffer};
|
||||
use crate::gpio::{AfType, AnyPin, OutputType, Pull, SealedPin as _, Speed};
|
||||
use crate::pac::sai::{vals, Sai as Regs};
|
||||
@ -26,7 +24,6 @@ pub enum Error {
|
||||
Overrun,
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
impl From<ringbuffer::Error> for Error {
|
||||
fn from(#[allow(unused)] err: ringbuffer::Error) -> Self {
|
||||
#[cfg(feature = "defmt")]
|
||||
@ -652,7 +649,6 @@ impl Config {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
enum RingBuffer<'d, W: word::Word> {
|
||||
Writable(WritableRingBuffer<'d, W>),
|
||||
Readable(ReadableRingBuffer<'d, W>),
|
||||
@ -679,7 +675,6 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AfType, AfType) {
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
fn get_ring_buffer<'d, T: Instance, W: word::Word>(
|
||||
dma: Peri<'d, impl Channel>,
|
||||
dma_buf: &'d mut [W],
|
||||
@ -750,14 +745,10 @@ pub struct Sai<'d, T: Instance, W: word::Word> {
|
||||
fs: Option<Peri<'d, AnyPin>>,
|
||||
sck: Option<Peri<'d, AnyPin>>,
|
||||
mclk: Option<Peri<'d, AnyPin>>,
|
||||
#[cfg(gpdma)]
|
||||
ring_buffer: PhantomData<W>,
|
||||
#[cfg(not(gpdma))]
|
||||
ring_buffer: RingBuffer<'d, W>,
|
||||
sub_block: WhichSubBlock,
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> {
|
||||
/// Create a new SAI driver in asynchronous mode with MCLK.
|
||||
///
|
||||
|
@ -8,9 +8,7 @@ use embassy_sync::waitqueue::AtomicWaker;
|
||||
|
||||
use crate::dma::ringbuffer::Error as RingbufferError;
|
||||
pub use crate::dma::word;
|
||||
#[cfg(not(gpdma))]
|
||||
use crate::dma::ReadableRingBuffer;
|
||||
use crate::dma::{Channel, TransferOptions};
|
||||
use crate::dma::{Channel, ReadableRingBuffer, TransferOptions};
|
||||
use crate::gpio::{AfType, AnyPin, Pull, SealedPin as _};
|
||||
use crate::interrupt::typelevel::Interrupt;
|
||||
use crate::pac::spdifrx::Spdifrx as Regs;
|
||||
@ -58,7 +56,6 @@ macro_rules! impl_spdifrx_pin {
|
||||
/// Ring-buffered SPDIFRX driver.
|
||||
///
|
||||
/// Data is read by DMAs and stored in a ring buffer.
|
||||
#[cfg(not(gpdma))]
|
||||
pub struct Spdifrx<'d, T: Instance> {
|
||||
_peri: Peri<'d, T>,
|
||||
spdifrx_in: Option<Peri<'d, AnyPin>>,
|
||||
@ -118,7 +115,6 @@ impl Default for Config {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
impl<'d, T: Instance> Spdifrx<'d, T> {
|
||||
fn dma_opts() -> TransferOptions {
|
||||
TransferOptions {
|
||||
@ -236,7 +232,6 @@ impl<'d, T: Instance> Spdifrx<'d, T> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
impl<'d, T: Instance> Drop for Spdifrx<'d, T> {
|
||||
fn drop(&mut self) {
|
||||
T::info().regs.cr().modify(|cr| cr.set_spdifen(0x00));
|
||||
|
@ -490,14 +490,14 @@ impl<'d, T: Instance> PdPhy<'d, T> {
|
||||
let sr = r.sr().read();
|
||||
|
||||
if sr.rxhrstdet() {
|
||||
dma.request_stop();
|
||||
dma.request_pause();
|
||||
|
||||
// Clean and re-enable hard reset receive interrupt.
|
||||
r.icr().write(|w| w.set_rxhrstdetcf(true));
|
||||
r.imr().modify(|w| w.set_rxhrstdetie(true));
|
||||
Poll::Ready(Err(RxError::HardReset))
|
||||
} else if sr.rxmsgend() {
|
||||
dma.request_stop();
|
||||
dma.request_pause();
|
||||
// Should be read immediately on interrupt.
|
||||
rxpaysz = r.rx_payszr().read().rxpaysz().into();
|
||||
|
||||
|
@ -1965,9 +1965,7 @@ pub use buffered::*;
|
||||
pub use crate::usart::buffered::InterruptHandler as BufferedInterruptHandler;
|
||||
mod buffered;
|
||||
|
||||
#[cfg(not(gpdma))]
|
||||
mod ringbuffered;
|
||||
#[cfg(not(gpdma))]
|
||||
pub use ringbuffered::RingBufferedUartRx;
|
||||
|
||||
#[cfg(any(usart_v1, usart_v2))]
|
||||
|
@ -381,7 +381,7 @@ impl ReadReady for RingBufferedUartRx<'_> {
|
||||
crate::dma::ringbuffer::Error::Overrun => Self::Error::Overrun,
|
||||
crate::dma::ringbuffer::Error::DmaUnsynced => {
|
||||
error!(
|
||||
"Ringbuffer error: DmaUNsynced, driver implementation is
|
||||
"Ringbuffer error: DmaUNsynced, driver implementation is
|
||||
probably bugged please open an issue"
|
||||
);
|
||||
// we report this as overrun since its recoverable in the same way
|
||||
|
52
examples/stm32h5/src/bin/sai.rs
Normal file
52
examples/stm32h5/src/bin/sai.rs
Normal file
@ -0,0 +1,52 @@
|
||||
#![no_std]
|
||||
#![no_main]
|
||||
|
||||
use defmt::info;
|
||||
use embassy_executor::Spawner;
|
||||
use embassy_stm32::{sai, Config};
|
||||
use {defmt_rtt as _, panic_probe as _};
|
||||
|
||||
#[embassy_executor::main]
|
||||
async fn main(_spawner: Spawner) {
|
||||
info!("Hello world.");
|
||||
|
||||
let mut config = Config::default();
|
||||
{
|
||||
use embassy_stm32::rcc::*;
|
||||
|
||||
config.rcc.pll2 = Some(Pll {
|
||||
source: PllSource::HSI,
|
||||
prediv: PllPreDiv::DIV16,
|
||||
mul: PllMul::MUL32,
|
||||
divp: Some(PllDiv::DIV16), // 8 MHz SAI clock
|
||||
divq: None,
|
||||
divr: None,
|
||||
});
|
||||
|
||||
config.rcc.mux.sai1sel = mux::Saisel::PLL2_P;
|
||||
}
|
||||
let p = embassy_stm32::init(config);
|
||||
|
||||
let mut write_buffer = [0u16; 1024];
|
||||
let (_, sai_b) = sai::split_subblocks(p.SAI1);
|
||||
|
||||
let mut sai_b = sai::Sai::new_asynchronous(
|
||||
sai_b,
|
||||
p.PF8,
|
||||
p.PE3,
|
||||
p.PF9,
|
||||
p.GPDMA1_CH0,
|
||||
&mut write_buffer,
|
||||
Default::default(),
|
||||
);
|
||||
|
||||
// Populate arbitrary data.
|
||||
let mut data = [0u16; 256];
|
||||
for (index, sample) in data.iter_mut().enumerate() {
|
||||
*sample = index as u16;
|
||||
}
|
||||
|
||||
loop {
|
||||
sai_b.write(&data).await.unwrap();
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user