#![macro_use] use core::future::Future; use core::pin::Pin; use core::sync::atomic::{fence, Ordering}; use core::task::{Context, Poll}; use embassy_hal_internal::Peri; use embassy_sync::waitqueue::AtomicWaker; use super::word::{Word, WordSize}; use super::{AnyChannel, Channel, Dir, Request, STATE}; use crate::interrupt::typelevel::Interrupt; use crate::interrupt::Priority; use crate::pac; use crate::pac::gpdma::vals; pub(crate) struct ChannelInfo { pub(crate) dma: pac::gpdma::Gpdma, pub(crate) num: usize, #[cfg(feature = "_dual-core")] pub(crate) irq: pac::Interrupt, } /// GPDMA transfer options. #[derive(Debug, Copy, Clone, PartialEq, Eq)] #[cfg_attr(feature = "defmt", derive(defmt::Format))] #[non_exhaustive] pub struct TransferOptions {} impl Default for TransferOptions { fn default() -> Self { Self {} } } impl From for vals::Dw { fn from(raw: WordSize) -> Self { match raw { WordSize::OneByte => Self::BYTE, WordSize::TwoBytes => Self::HALF_WORD, WordSize::FourBytes => Self::WORD, } } } pub(crate) struct ChannelState { waker: AtomicWaker, } impl ChannelState { pub(crate) const NEW: Self = Self { waker: AtomicWaker::new(), }; } /// safety: must be called only once pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) { foreach_interrupt! { ($peri:ident, gpdma, $block:ident, $signal_name:ident, $irq:ident) => { crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority); #[cfg(not(feature = "_dual-core"))] crate::interrupt::typelevel::$irq::enable(); }; } crate::_generated::init_gpdma(); } impl AnyChannel { /// Safety: Must be called with a matching set of parameters for a valid dma channel pub(crate) unsafe fn on_irq(&self) { let info = self.info(); #[cfg(feature = "_dual-core")] { use embassy_hal_internal::interrupt::InterruptExt as _; info.irq.enable(); } let state = &STATE[self.id as usize]; let ch = info.dma.ch(info.num); let sr = ch.sr().read(); if sr.dtef() { panic!( "DMA: data transfer error on DMA@{:08x} channel {}", info.dma.as_ptr() as u32, info.num ); } if sr.usef() { panic!( "DMA: user settings error on DMA@{:08x} channel {}", info.dma.as_ptr() as u32, info.num ); } if sr.suspf() || sr.tcf() { // disable all xxIEs to prevent the irq from firing again. ch.cr().write(|_| {}); // Wake the future. It'll look at tcf and see it's set. state.waker.wake(); } } } /// DMA transfer. #[must_use = "futures do nothing unless you `.await` or poll them"] pub struct Transfer<'a> { channel: Peri<'a, AnyChannel>, } impl<'a> Transfer<'a> { /// Create a new read DMA transfer (peripheral to memory). pub unsafe fn new_read( channel: Peri<'a, impl Channel>, request: Request, peri_addr: *mut W, buf: &'a mut [W], options: TransferOptions, ) -> Self { Self::new_read_raw(channel, request, peri_addr, buf, options) } /// Create a new read DMA transfer (peripheral to memory), using raw pointers. pub unsafe fn new_read_raw( channel: Peri<'a, impl Channel>, request: Request, peri_addr: *mut PW, buf: *mut [MW], options: TransferOptions, ) -> Self { Self::new_inner( channel.into(), request, Dir::PeripheralToMemory, peri_addr as *const u32, buf as *mut MW as *mut u32, buf.len(), true, PW::size(), MW::size(), options, ) } /// Create a new write DMA transfer (memory to peripheral). pub unsafe fn new_write( channel: Peri<'a, impl Channel>, request: Request, buf: &'a [MW], peri_addr: *mut PW, options: TransferOptions, ) -> Self { Self::new_write_raw(channel, request, buf, peri_addr, options) } /// Create a new write DMA transfer (memory to peripheral), using raw pointers. pub unsafe fn new_write_raw( channel: Peri<'a, impl Channel>, request: Request, buf: *const [MW], peri_addr: *mut PW, options: TransferOptions, ) -> Self { Self::new_inner( channel.into(), request, Dir::MemoryToPeripheral, peri_addr as *const u32, buf as *const MW as *mut u32, buf.len(), true, MW::size(), PW::size(), options, ) } /// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly. pub unsafe fn new_write_repeated( channel: Peri<'a, impl Channel>, request: Request, repeated: &'a MW, count: usize, peri_addr: *mut PW, options: TransferOptions, ) -> Self { Self::new_inner( channel.into(), request, Dir::MemoryToPeripheral, peri_addr as *const u32, repeated as *const MW as *mut u32, count, false, MW::size(), PW::size(), options, ) } unsafe fn new_inner( channel: Peri<'a, AnyChannel>, request: Request, dir: Dir, peri_addr: *const u32, mem_addr: *mut u32, mem_len: usize, incr_mem: bool, data_size: WordSize, dst_size: WordSize, _options: TransferOptions, ) -> Self { // BNDT is specified as bytes, not as number of transfers. let Ok(bndt) = (mem_len * data_size.bytes()).try_into() else { panic!("DMA transfers may not be larger than 65535 bytes."); }; let info = channel.info(); let ch = info.dma.ch(info.num); // "Preceding reads and writes cannot be moved past subsequent writes." fence(Ordering::SeqCst); let this = Self { channel }; ch.cr().write(|w| w.set_reset(true)); ch.fcr().write(|w| w.0 = 0xFFFF_FFFF); // clear all irqs ch.llr().write(|_| {}); // no linked list ch.tr1().write(|w| { w.set_sdw(data_size.into()); w.set_ddw(dst_size.into()); w.set_sinc(dir == Dir::MemoryToPeripheral && incr_mem); w.set_dinc(dir == Dir::PeripheralToMemory && incr_mem); }); ch.tr2().write(|w| { w.set_dreq(match dir { Dir::MemoryToPeripheral => vals::Dreq::DESTINATION_PERIPHERAL, Dir::PeripheralToMemory => vals::Dreq::SOURCE_PERIPHERAL, }); w.set_reqsel(request); }); ch.tr3().write(|_| {}); // no address offsets. ch.br1().write(|w| w.set_bndt(bndt)); match dir { Dir::MemoryToPeripheral => { ch.sar().write_value(mem_addr as _); ch.dar().write_value(peri_addr as _); } Dir::PeripheralToMemory => { ch.sar().write_value(peri_addr as _); ch.dar().write_value(mem_addr as _); } } ch.cr().write(|w| { // Enable interrupts w.set_tcie(true); w.set_useie(true); w.set_dteie(true); w.set_suspie(true); // Start it w.set_en(true); }); this } /// Request the transfer to stop. /// /// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false. pub fn request_stop(&mut self) { let info = self.channel.info(); let ch = info.dma.ch(info.num); ch.cr().modify(|w| w.set_susp(true)) } /// Return whether this transfer is still running. /// /// If this returns `false`, it can be because either the transfer finished, or /// it was requested to stop early with [`request_stop`](Self::request_stop). pub fn is_running(&mut self) -> bool { let info = self.channel.info(); let ch = info.dma.ch(info.num); let sr = ch.sr().read(); !sr.tcf() && !sr.suspf() } /// Gets the total remaining transfers for the channel /// Note: this will be zero for transfers that completed without cancellation. pub fn get_remaining_transfers(&self) -> u16 { let info = self.channel.info(); let ch = info.dma.ch(info.num); ch.br1().read().bndt() } /// Blocking wait until the transfer finishes. pub fn blocking_wait(mut self) { while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." fence(Ordering::SeqCst); core::mem::forget(self); } } impl<'a> Drop for Transfer<'a> { fn drop(&mut self) { self.request_stop(); while self.is_running() {} // "Subsequent reads and writes cannot be moved ahead of preceding reads." fence(Ordering::SeqCst); } } impl<'a> Unpin for Transfer<'a> {} impl<'a> Future for Transfer<'a> { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { let state = &STATE[self.channel.id as usize]; state.waker.register(cx.waker()); if self.is_running() { Poll::Pending } else { Poll::Ready(()) } } }