Merge pull request #4338 from i509VCB/dma

mspm0: add dma driver
This commit is contained in:
i509VCB 2025-07-07 03:02:48 +00:00 committed by GitHub
commit f56197c51e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
11 changed files with 1279 additions and 25 deletions

View File

@ -46,14 +46,14 @@ cortex-m = "0.7.6"
critical-section = "1.2.0"
# mspm0-metapac = { version = "" }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-26a6f681eda4ef120e8cb614a1631727c848590f" }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-235158ac2865d8aac3a1eceb2d62026eb12bf38f" }
[build-dependencies]
proc-macro2 = "1.0.94"
quote = "1.0.40"
# mspm0-metapac = { version = "", default-features = false, features = ["metadata"] }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-26a6f681eda4ef120e8cb614a1631727c848590f", default-features = false, features = ["metadata"] }
mspm0-metapac = { git = "https://github.com/mspm0-rs/mspm0-data-generated/", tag = "mspm0-data-235158ac2865d8aac3a1eceb2d62026eb12bf38f", default-features = false, features = ["metadata"] }
[features]
default = ["rt"]

View File

@ -67,6 +67,7 @@ fn generate_code() {
g.extend(generate_peripheral_instances());
g.extend(generate_pin_trait_impls());
g.extend(generate_groups());
g.extend(generate_dma_channel_count());
let out_dir = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
let out_file = out_dir.join("_generated.rs").to_string_lossy().to_string();
@ -209,6 +210,12 @@ fn generate_groups() -> TokenStream {
}
}
fn generate_dma_channel_count() -> TokenStream {
let count = METADATA.dma_channels.len();
quote! { pub const DMA_CHANNELS: usize = #count; }
}
#[derive(Debug, Clone)]
struct Singleton {
name: String,
@ -543,8 +550,6 @@ fn generate_peripheral_instances() -> TokenStream {
for peripheral in METADATA.peripherals {
let peri = format_ident!("{}", peripheral.name);
// Will be filled in when uart implementation is finished
let _ = peri;
let tokens = match peripheral.kind {
"uart" => Some(quote! { impl_uart_instance!(#peri); }),
_ => None,
@ -555,6 +560,18 @@ fn generate_peripheral_instances() -> TokenStream {
}
}
// DMA channels
for dma_channel in METADATA.dma_channels.iter() {
let peri = format_ident!("DMA_CH{}", dma_channel.number);
let num = dma_channel.number;
if dma_channel.full {
impls.push(quote! { impl_full_dma_channel!(#peri, #num); });
} else {
impls.push(quote! { impl_dma_channel!(#peri, #num); });
}
}
quote! {
#(#impls)*
}

626
embassy-mspm0/src/dma.rs Normal file
View File

@ -0,0 +1,626 @@
//! Direct Memory Access (DMA)
#![macro_use]
use core::future::Future;
use core::mem;
use core::pin::Pin;
use core::sync::atomic::{compiler_fence, Ordering};
use core::task::{Context, Poll};
use critical_section::CriticalSection;
use embassy_hal_internal::interrupt::InterruptExt;
use embassy_hal_internal::{impl_peripheral, PeripheralType};
use embassy_sync::waitqueue::AtomicWaker;
use mspm0_metapac::common::{Reg, RW};
use mspm0_metapac::dma::regs;
use mspm0_metapac::dma::vals::{self, Autoen, Em, Incr, Preirq, Wdth};
use crate::{interrupt, pac, Peri};
/// The burst size of a DMA transfer.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BurstSize {
/// The whole block transfer is completed in one transfer without interruption.
Complete,
/// The burst size is 8, after 9 transfers the block transfer is interrupted and the priority
/// is reevaluated.
_8,
/// The burst size is 16, after 17 transfers the block transfer is interrupted and the priority
/// is reevaluated.
_16,
/// The burst size is 32, after 32 transfers the block transfer is interrupted and the priority
/// is reevaluated.
_32,
}
/// DMA channel.
#[allow(private_bounds)]
pub trait Channel: Into<AnyChannel> + PeripheralType {}
/// Full DMA channel.
#[allow(private_bounds)]
pub trait FullChannel: Channel + Into<AnyFullChannel> {}
/// Type-erased DMA channel.
pub struct AnyChannel {
pub(crate) id: u8,
}
impl_peripheral!(AnyChannel);
impl SealedChannel for AnyChannel {
fn id(&self) -> u8 {
self.id
}
}
impl Channel for AnyChannel {}
/// Type-erased full DMA channel.
pub struct AnyFullChannel {
pub(crate) id: u8,
}
impl_peripheral!(AnyFullChannel);
impl SealedChannel for AnyFullChannel {
fn id(&self) -> u8 {
self.id
}
}
impl Channel for AnyFullChannel {}
impl FullChannel for AnyFullChannel {}
impl From<AnyFullChannel> for AnyChannel {
fn from(value: AnyFullChannel) -> Self {
Self { id: value.id }
}
}
#[allow(private_bounds)]
pub trait Word: SealedWord {
/// Size in bytes for the width.
fn size() -> isize;
}
impl SealedWord for u8 {
fn width() -> vals::Wdth {
vals::Wdth::BYTE
}
}
impl Word for u8 {
fn size() -> isize {
1
}
}
impl SealedWord for u16 {
fn width() -> vals::Wdth {
vals::Wdth::HALF
}
}
impl Word for u16 {
fn size() -> isize {
2
}
}
impl SealedWord for u32 {
fn width() -> vals::Wdth {
vals::Wdth::WORD
}
}
impl Word for u32 {
fn size() -> isize {
4
}
}
impl SealedWord for u64 {
fn width() -> vals::Wdth {
vals::Wdth::LONG
}
}
impl Word for u64 {
fn size() -> isize {
8
}
}
// TODO: u128 (LONGLONG) support. G350x does support it, but other parts do not such as C110x. More metadata is
// needed to properly enable this.
// impl SealedWord for u128 {
// fn width() -> vals::Wdth {
// vals::Wdth::LONGLONG
// }
// }
// impl Word for u128 {
// fn size() -> isize {
// 16
// }
// }
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Error {
/// The DMA transfer is too large.
///
/// The hardware limits the DMA to 16384 transfers per channel at a time. This means that transferring
/// 16384 `u8` and 16384 `u64` are equivalent, since the DMA must copy 16384 values.
TooManyTransfers,
}
/// DMA transfer mode for basic channels.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum TransferMode {
/// Each DMA trigger will transfer a single value.
Single,
/// Each DMA trigger will transfer the complete block with one trigger.
Block,
}
/// DMA transfer options.
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
#[non_exhaustive]
pub struct TransferOptions {
/// DMA transfer mode.
pub mode: TransferMode,
// TODO: Read and write stride.
}
impl Default for TransferOptions {
fn default() -> Self {
Self {
mode: TransferMode::Single,
}
}
}
/// DMA transfer.
#[must_use = "futures do nothing unless you `.await` or poll them"]
pub struct Transfer<'a> {
channel: Peri<'a, AnyChannel>,
}
impl<'a> Transfer<'a> {
/// Software trigger source.
///
/// Using this trigger source means that a transfer will start immediately rather than waiting for
/// a hardware event. This can be useful if you want to do a DMA accelerated memcpy.
pub const SOFTWARE_TRIGGER: u8 = 0;
/// Create a new read DMA transfer.
pub unsafe fn new_read<SW: Word, DW: Word>(
channel: Peri<'a, impl Channel>,
trigger_source: u8,
src: *mut SW,
dst: &'a mut [DW],
options: TransferOptions,
) -> Result<Self, Error> {
Self::new_read_raw(channel, trigger_source, src, dst, options)
}
/// Create a new read DMA transfer, using raw pointers.
pub unsafe fn new_read_raw<SW: Word, DW: Word>(
channel: Peri<'a, impl Channel>,
trigger_source: u8,
src: *mut SW,
dst: *mut [DW],
options: TransferOptions,
) -> Result<Self, Error> {
verify_transfer::<DW>(dst)?;
let channel = channel.into();
channel.configure(
trigger_source,
src.cast(),
SW::width(),
dst.cast(),
DW::width(),
dst.len() as u16,
false,
true,
options,
);
channel.start();
Ok(Self { channel })
}
/// Create a new write DMA transfer.
pub unsafe fn new_write<SW: Word, DW: Word>(
channel: Peri<'a, impl Channel>,
trigger_source: u8,
src: &'a [SW],
dst: *mut DW,
options: TransferOptions,
) -> Result<Self, Error> {
Self::new_write_raw(channel, trigger_source, src, dst, options)
}
/// Create a new write DMA transfer, using raw pointers.
pub unsafe fn new_write_raw<SW: Word, DW: Word>(
channel: Peri<'a, impl Channel>,
trigger_source: u8,
src: *const [SW],
dst: *mut DW,
options: TransferOptions,
) -> Result<Self, Error> {
verify_transfer::<SW>(src)?;
let channel = channel.into();
channel.configure(
trigger_source,
src.cast(),
SW::width(),
dst.cast(),
DW::width(),
src.len() as u16,
true,
false,
options,
);
channel.start();
Ok(Self { channel })
}
// TODO: Copy between slices.
/// Request the transfer to resume.
pub fn resume(&mut self) {
self.channel.resume();
}
/// Request the transfer to pause, keeping the existing configuration for this channel.
/// To restart the transfer, call [`start`](Self::start) again.
///
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
pub fn request_pause(&mut self) {
self.channel.request_pause();
}
/// Return whether this transfer is still running.
///
/// If this returns [`false`], it can be because either the transfer finished, or
/// it was requested to stop early with [`request_stop`].
pub fn is_running(&mut self) -> bool {
self.channel.is_running()
}
/// Blocking wait until the transfer finishes.
pub fn blocking_wait(mut self) {
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
// Prevent drop from being called since we ran to completion (drop will try to pause).
mem::forget(self);
}
}
impl<'a> Unpin for Transfer<'a> {}
impl<'a> Future for Transfer<'a> {
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let state: &ChannelState = &STATE[self.channel.id as usize];
state.waker.register(cx.waker());
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
if self.channel.is_running() {
Poll::Pending
} else {
Poll::Ready(())
}
}
}
impl<'a> Drop for Transfer<'a> {
fn drop(&mut self) {
self.channel.request_pause();
while self.is_running() {}
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
}
}
// impl details
fn verify_transfer<W: Word>(ptr: *const [W]) -> Result<(), Error> {
if ptr.len() > (u16::MAX as usize) {
return Err(Error::TooManyTransfers);
}
// TODO: Stride checks
Ok(())
}
fn convert_burst_size(value: BurstSize) -> vals::Burstsz {
match value {
BurstSize::Complete => vals::Burstsz::INFINITI,
BurstSize::_8 => vals::Burstsz::BURST_8,
BurstSize::_16 => vals::Burstsz::BURST_16,
BurstSize::_32 => vals::Burstsz::BURST_32,
}
}
fn convert_mode(mode: TransferMode) -> vals::Tm {
match mode {
TransferMode::Single => vals::Tm::SINGLE,
TransferMode::Block => vals::Tm::BLOCK,
}
}
const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS;
static STATE: [ChannelState; CHANNEL_COUNT] = [const { ChannelState::new() }; CHANNEL_COUNT];
struct ChannelState {
waker: AtomicWaker,
}
impl ChannelState {
const fn new() -> Self {
Self {
waker: AtomicWaker::new(),
}
}
}
/// SAFETY: Must only be called once.
///
/// Changing the burst size mid transfer may have some odd behavior.
pub(crate) unsafe fn init(_cs: CriticalSection, burst_size: BurstSize, round_robin: bool) {
pac::DMA.prio().modify(|prio| {
prio.set_burstsz(convert_burst_size(burst_size));
prio.set_roundrobin(round_robin);
});
pac::DMA.int_event(0).imask().modify(|w| {
w.set_dataerr(true);
w.set_addrerr(true);
});
interrupt::DMA.enable();
}
pub(crate) trait SealedWord {
fn width() -> vals::Wdth;
}
pub(crate) trait SealedChannel {
fn id(&self) -> u8;
#[inline]
fn tctl(&self) -> Reg<regs::Tctl, RW> {
pac::DMA.trig(self.id() as usize).tctl()
}
#[inline]
fn ctl(&self) -> Reg<regs::Ctl, RW> {
pac::DMA.chan(self.id() as usize).ctl()
}
#[inline]
fn sa(&self) -> Reg<u32, RW> {
pac::DMA.chan(self.id() as usize).sa()
}
#[inline]
fn da(&self) -> Reg<u32, RW> {
pac::DMA.chan(self.id() as usize).da()
}
#[inline]
fn sz(&self) -> Reg<regs::Sz, RW> {
pac::DMA.chan(self.id() as usize).sz()
}
#[inline]
fn mask_interrupt(&self, enable: bool) {
// Enabling interrupts is an RMW operation.
critical_section::with(|_cs| {
pac::DMA.int_event(0).imask().modify(|w| {
w.set_ch(self.id() as usize, enable);
});
})
}
/// # Safety
///
/// - `src` must be valid for the lifetime of the transfer.
/// - `dst` must be valid for the lifetime of the transfer.
unsafe fn configure(
&self,
trigger_sel: u8,
src: *const u32,
src_wdth: Wdth,
dst: *const u32,
dst_wdth: Wdth,
transfer_count: u16,
increment_src: bool,
increment_dst: bool,
options: TransferOptions,
) {
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
self.ctl().modify(|w| {
// SLAU 5.2.5:
// "The DMATSEL bits should be modified only when the DMACTLx.DMAEN bit is
// 0; otherwise, unpredictable DMA triggers can occur."
//
// We also want to stop any transfers before setup.
w.set_en(false);
w.set_req(false);
// Not every part supports auto enable, so force its value to 0.
w.set_autoen(Autoen::NONE);
w.set_preirq(Preirq::PREIRQ_DISABLE);
w.set_srcwdth(src_wdth);
w.set_dstwdth(dst_wdth);
w.set_srcincr(if increment_src {
Incr::INCREMENT
} else {
Incr::UNCHANGED
});
w.set_dstincr(if increment_dst {
Incr::INCREMENT
} else {
Incr::UNCHANGED
});
w.set_em(Em::NORMAL);
// Single and block will clear the enable bit when the transfers finish.
w.set_tm(convert_mode(options.mode));
});
self.tctl().write(|w| {
w.set_tsel(trigger_sel);
// Basic channels do not implement cross triggering.
w.set_tint(vals::Tint::EXTERNAL);
});
self.sz().write(|w| {
w.set_size(transfer_count);
});
self.sa().write_value(src as u32);
self.da().write_value(dst as u32);
// Enable the channel.
self.ctl().modify(|w| {
// FIXME: Why did putting set_req later fix some transfers
w.set_en(true);
w.set_req(true);
});
}
fn start(&self) {
self.mask_interrupt(true);
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
// Request the DMA transfer to start.
self.ctl().modify(|w| {
w.set_req(true);
});
}
fn resume(&self) {
self.mask_interrupt(true);
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
self.ctl().modify(|w| {
// w.set_en(true);
w.set_req(true);
});
}
fn request_pause(&self) {
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
// Stop the transfer.
//
// SLAU846 5.2.6:
// "A DMA block transfer in progress can be stopped by clearing the DMAEN bit"
self.ctl().modify(|w| {
// w.set_en(false);
w.set_req(false);
});
}
fn is_running(&self) -> bool {
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
compiler_fence(Ordering::SeqCst);
let ctl = self.ctl().read();
// Is the transfer requested?
ctl.req()
// Is the channel enabled?
&& ctl.en()
}
}
macro_rules! impl_dma_channel {
($instance: ident, $num: expr) => {
impl crate::dma::SealedChannel for crate::peripherals::$instance {
fn id(&self) -> u8 {
$num
}
}
impl From<crate::peripherals::$instance> for crate::dma::AnyChannel {
fn from(value: crate::peripherals::$instance) -> Self {
use crate::dma::SealedChannel;
Self { id: value.id() }
}
}
impl crate::dma::Channel for crate::peripherals::$instance {}
};
}
// C1104 has no full DMA channels.
#[allow(unused_macros)]
macro_rules! impl_full_dma_channel {
($instance: ident, $num: expr) => {
impl_dma_channel!($instance, $num);
impl From<crate::peripherals::$instance> for crate::dma::AnyFullChannel {
fn from(value: crate::peripherals::$instance) -> Self {
use crate::dma::SealedChannel;
Self { id: value.id() }
}
}
impl crate::dma::FullChannel for crate::peripherals::$instance {}
};
}
#[cfg(feature = "rt")]
#[interrupt]
fn DMA() {
use crate::BitIter;
let events = pac::DMA.int_event(0);
let mis = events.mis().read();
// TODO: Handle DATAERR and ADDRERR? However we do not know which channel causes an error.
if mis.dataerr() {
panic!("DMA data error");
} else if mis.addrerr() {
panic!("DMA address error")
}
// Ignore preirq interrupts (values greater than 16).
for i in BitIter(mis.0 & 0x0000_FFFF) {
if let Some(state) = STATE.get(i as usize) {
state.waker.wake();
// Notify the future that the counter size hit zero
events.imask().modify(|w| {
w.set_ch(i as usize, false);
});
}
}
}

View File

@ -1090,7 +1090,9 @@ pub(crate) fn init(gpio: gpio::Gpio) {
#[cfg(feature = "rt")]
fn irq_handler(gpio: gpio::Gpio, wakers: &[AtomicWaker; 32]) {
use crate::BitIter;
// Only consider pins which have interrupts unmasked.
let bits = gpio.cpu_int().mis().read().0;
for i in BitIter(bits) {
@ -1103,22 +1105,6 @@ fn irq_handler(gpio: gpio::Gpio, wakers: &[AtomicWaker; 32]) {
}
}
struct BitIter(u32);
impl Iterator for BitIter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
match self.0.trailing_zeros() {
32 => None,
b => {
self.0 &= !(1 << b);
Some(b)
}
}
}
}
// C110x and L110x have a dedicated interrupts just for GPIOA.
//
// These chips do not have a GROUP1 interrupt.

View File

@ -13,6 +13,7 @@ pub(crate) mod fmt;
// This must be declared early as well for
mod macros;
pub mod dma;
pub mod gpio;
pub mod timer;
pub mod uart;
@ -59,22 +60,106 @@ pub(crate) use mspm0_metapac as pac;
pub use crate::_generated::interrupt;
/// Macro to bind interrupts to handlers.
///
/// This defines the right interrupt handlers, and creates a unit struct (like `struct Irqs;`)
/// and implements the right [`Binding`]s for it. You can pass this struct to drivers to
/// prove at compile-time that the right interrupts have been bound.
///
/// Example of how to bind one interrupt:
///
/// ```rust,ignore
/// use embassy_nrf::{bind_interrupts, spim, peripherals};
///
/// bind_interrupts!(
/// /// Binds the SPIM3 interrupt.
/// struct Irqs {
/// SPIM3 => spim::InterruptHandler<peripherals::SPI3>;
/// }
/// );
/// ```
///
/// Example of how to bind multiple interrupts in a single macro invocation:
///
/// ```rust,ignore
/// use embassy_nrf::{bind_interrupts, spim, twim, peripherals};
///
/// bind_interrupts!(struct Irqs {
/// SPIM3 => spim::InterruptHandler<peripherals::SPI3>;
/// TWISPI0 => twim::InterruptHandler<peripherals::TWISPI0>;
/// });
/// ```
// developer note: this macro can't be in `embassy-hal-internal` due to the use of `$crate`.
#[macro_export]
macro_rules! bind_interrupts {
($(#[$attr:meta])* $vis:vis struct $name:ident {
$(
$(#[cfg($cond_irq:meta)])?
$irq:ident => $(
$(#[cfg($cond_handler:meta)])?
$handler:ty
),*;
)*
}) => {
#[derive(Copy, Clone)]
$(#[$attr])*
$vis struct $name;
$(
#[allow(non_snake_case)]
#[no_mangle]
$(#[cfg($cond_irq)])?
unsafe extern "C" fn $irq() {
$(
$(#[cfg($cond_handler)])?
<$handler as $crate::interrupt::typelevel::Handler<$crate::interrupt::typelevel::$irq>>::on_interrupt();
)*
}
$(#[cfg($cond_irq)])?
$crate::bind_interrupts!(@inner
$(
$(#[cfg($cond_handler)])?
unsafe impl $crate::interrupt::typelevel::Binding<$crate::interrupt::typelevel::$irq, $handler> for $name {}
)*
);
)*
};
(@inner $($t:tt)*) => {
$($t)*
}
}
/// `embassy-mspm0` global configuration.
#[non_exhaustive]
#[derive(Clone, Copy)]
pub struct Config {
// TODO
// TODO: OSC configuration.
/// The size of DMA block transfer burst.
///
/// If this is set to a value
pub dma_burst_size: dma::BurstSize,
/// Whether the DMA channels are used in a fixed priority or a round robin fashion.
///
/// If [`false`], the DMA priorities are fixed.
///
/// If [`true`], after a channel finishes a transfer it becomes the lowest priority.
pub dma_round_robin: bool,
}
impl Default for Config {
fn default() -> Self {
Self {
// TODO
dma_burst_size: dma::BurstSize::Complete,
dma_round_robin: false,
}
}
}
pub fn init(_config: Config) -> Peripherals {
pub fn init(config: Config) -> Peripherals {
critical_section::with(|cs| {
let peripherals = Peripherals::take_with_cs(cs);
@ -112,9 +197,33 @@ pub fn init(_config: Config) -> Peripherals {
crate::interrupt::typelevel::GPIOA::enable();
}
// SAFETY: Peripherals::take_with_cs will only be run once or panic.
unsafe { dma::init(cs, config.dma_burst_size, config.dma_round_robin) };
#[cfg(feature = "_time-driver")]
time_driver::init(cs);
peripherals
})
}
pub(crate) mod sealed {
#[allow(dead_code)]
pub trait Sealed {}
}
struct BitIter(u32);
impl Iterator for BitIter {
type Item = u32;
fn next(&mut self) -> Option<Self::Item> {
match self.0.trailing_zeros() {
32 => None,
b => {
self.0 &= !(1 << b);
Some(b)
}
}
}
}

View File

@ -6,4 +6,4 @@ runner = "probe-rs run --chip MSPM0G3507 --protocol=swd"
target = "thumbv6m-none-eabi"
[env]
DEFMT_LOG = "debug"
DEFMT_LOG = "trace"

View File

@ -6,6 +6,7 @@ license = "MIT OR Apache-2.0"
[features]
mspm0g3507 = [ "embassy-mspm0/mspm0g3507pm" ]
mspm0g3519 = [ "embassy-mspm0/mspm0g3519pz" ]
[dependencies]
teleprobe-meta = "1.1"

View File

@ -8,6 +8,9 @@ fn main() -> Result<(), Box<dyn Error>> {
#[cfg(feature = "mspm0g3507")]
let memory_x = include_bytes!("memory_g3507.x");
#[cfg(feature = "mspm0g3519")]
let memory_x = include_bytes!("memory_g3519.x");
fs::write(out.join("memory.x"), memory_x).unwrap();
println!("cargo:rustc-link-search={}", out.display());

View File

@ -0,0 +1,6 @@
MEMORY
{
FLASH : ORIGIN = 0x00000000, LENGTH = 128K
/* Select non-parity range of SRAM due to SRAM_ERR_01 errata in SLAZ758 */
RAM : ORIGIN = 0x20200000, LENGTH = 64K
}

503
tests/mspm0/src/bin/dma.rs Normal file
View File

@ -0,0 +1,503 @@
#![no_std]
#![no_main]
#[cfg(feature = "mspm0g3507")]
teleprobe_meta::target!(b"lp-mspm0g3507");
#[cfg(feature = "mspm0g3519")]
teleprobe_meta::target!(b"lp-mspm0g3519");
use core::slice;
use defmt::{assert, assert_eq, *};
use embassy_executor::Spawner;
use embassy_mspm0::dma::{Channel, Transfer, TransferMode, TransferOptions, Word};
use embassy_mspm0::Peri;
use {defmt_rtt as _, panic_probe as _};
#[embassy_executor::main]
async fn main(_spawner: Spawner) {
let mut p = embassy_mspm0::init(Default::default());
info!("Hello World!");
{
info!("Single u8 read (blocking)");
single_read(p.DMA_CH0.reborrow(), 0x41_u8);
info!("Single u16 read (blocking)");
single_read(p.DMA_CH0.reborrow(), 0xFF41_u16);
info!("Single u32 read (blocking)");
single_read(p.DMA_CH0.reborrow(), 0xFFEE_FF41_u32);
info!("Single u64 read (blocking)");
single_read(p.DMA_CH0.reborrow(), 0x0011_2233_FFEE_FF41_u64);
}
// Widening transfers
{
info!("Single u8 read to u16");
widening_single_read::<u8, u16>(p.DMA_CH0.reborrow(), 0x41);
info!("Single u8 read to u32");
widening_single_read::<u8, u32>(p.DMA_CH0.reborrow(), 0x43);
info!("Single u8 read to u64");
widening_single_read::<u8, u64>(p.DMA_CH0.reborrow(), 0x47);
info!("Single u16 read to u32");
widening_single_read::<u16, u32>(p.DMA_CH0.reborrow(), 0xAE43);
info!("Single u16 read to u64");
widening_single_read::<u16, u64>(p.DMA_CH0.reborrow(), 0xAF47);
info!("Single u32 read to u64");
widening_single_read::<u32, u64>(p.DMA_CH0.reborrow(), 0xDEAD_AF47);
}
// Narrowing transfers.
{
info!("Single u16 read to u8");
narrowing_single_read::<u16, u8>(p.DMA_CH0.reborrow(), 0x4142);
info!("Single u32 read to u8");
narrowing_single_read::<u32, u8>(p.DMA_CH0.reborrow(), 0x4142_2414);
info!("Single u64 read to u8");
narrowing_single_read::<u64, u8>(p.DMA_CH0.reborrow(), 0x4142_2414_5153_7776);
info!("Single u32 read to u16");
narrowing_single_read::<u32, u16>(p.DMA_CH0.reborrow(), 0x4142_2414);
info!("Single u64 read to u16");
narrowing_single_read::<u64, u16>(p.DMA_CH0.reborrow(), 0x4142_2414_5153_7776);
info!("Single u64 read to u32");
narrowing_single_read::<u64, u32>(p.DMA_CH0.reborrow(), 0x4142_2414_5153_7776);
}
{
info!("Single u8 read (async)");
async_single_read(p.DMA_CH0.reborrow(), 0x42_u8).await;
info!("Single u16 read (async)");
async_single_read(p.DMA_CH0.reborrow(), 0xAE42_u16).await;
info!("Single u32 read (async)");
async_single_read(p.DMA_CH0.reborrow(), 0xFE44_1500_u32).await;
info!("Single u64 read (async)");
async_single_read(p.DMA_CH0.reborrow(), 0x8F7F_6F5F_4F3F_2F1F_u64).await;
}
{
info!("Multiple u8 reads (blocking)");
block_read::<_, 16>(p.DMA_CH0.reborrow(), 0x98_u8);
info!("Multiple u16 reads (blocking)");
block_read::<_, 2>(p.DMA_CH0.reborrow(), 0x9801_u16);
info!("Multiple u32 reads (blocking)");
block_read::<_, 4>(p.DMA_CH0.reborrow(), 0x9821_9801_u32);
info!("Multiple u64 reads (blocking)");
block_read::<_, 4>(p.DMA_CH0.reborrow(), 0xABCD_EF01_2345_6789_u64);
}
{
info!("Multiple u8 reads (async)");
async_block_read::<_, 8>(p.DMA_CH0.reborrow(), 0x86_u8).await;
info!("Multiple u16 reads (async)");
async_block_read::<_, 6>(p.DMA_CH0.reborrow(), 0x7777_u16).await;
info!("Multiple u32 reads (async)");
async_block_read::<_, 3>(p.DMA_CH0.reborrow(), 0xA5A5_A5A5_u32).await;
info!("Multiple u64 reads (async)");
async_block_read::<_, 14>(p.DMA_CH0.reborrow(), 0x5A5A_5A5A_A5A5_A5A5_u64).await;
}
// Intentionally skip testing multiple reads in single transfer mode.
//
// If the destination length is greater than 1 and single transfer mode is used then two transfers
// are performed in a trigger. Similarly with any other length of destination above 2, only 2 transfers
// are performed. Issuing another trigger (resume) results in no further progress. More than likely
// the test does not work due to some combination of a hardware bug and the datasheet being unclear
// regarding what ends a software trigger.
//
// However this case works fine with a hardware trigger (such as the ADC hardware trigger).
{
info!("Single u8 write (blocking)");
single_write(p.DMA_CH0.reborrow(), 0x41_u8);
info!("Single u16 write (blocking)");
single_write(p.DMA_CH0.reborrow(), 0x4142_u16);
info!("Single u32 write (blocking)");
single_write(p.DMA_CH0.reborrow(), 0x4142_4344_u32);
info!("Single u64 write (blocking)");
single_write(p.DMA_CH0.reborrow(), 0x4142_4344_4546_4748_u64);
}
{
info!("Single u8 write (async)");
async_single_write(p.DMA_CH0.reborrow(), 0xAA_u8).await;
info!("Single u16 write (async)");
async_single_write(p.DMA_CH0.reborrow(), 0xBBBB_u16).await;
info!("Single u32 write (async)");
async_single_write(p.DMA_CH0.reborrow(), 0xCCCC_CCCC_u32).await;
info!("Single u64 write (async)");
async_single_write(p.DMA_CH0.reborrow(), 0xDDDD_DDDD_DDDD_DDDD_u64).await;
}
{
info!("Multiple u8 writes (blocking)");
block_write(p.DMA_CH0.reborrow(), &[0xFF_u8, 0x7F, 0x3F, 0x1F]);
info!("Multiple u16 writes (blocking)");
block_write(p.DMA_CH0.reborrow(), &[0xFFFF_u16, 0xFF7F, 0xFF3F, 0xFF1F]);
info!("Multiple u32 writes (blocking)");
block_write(
p.DMA_CH0.reborrow(),
&[0xFF00_00FF_u32, 0xFF00_007F, 0x0000_FF3F, 0xFF1F_0000],
);
info!("Multiple u64 writes (blocking)");
block_write(
p.DMA_CH0.reborrow(),
&[
0xFF00_0000_0000_00FF_u64,
0x0000_FF00_007F_0000,
0x0000_FF3F_0000_0000,
0xFF1F_0000_1111_837A,
],
);
}
{
info!("Multiple u8 writes (async)");
async_block_write(p.DMA_CH0.reborrow(), &[0u8, 1, 2, 3]).await;
info!("Multiple u16 writes (async)");
async_block_write(p.DMA_CH0.reborrow(), &[0x9801u16, 0x9802, 0x9803, 0x9800, 0x9000]).await;
info!("Multiple u32 writes (async)");
async_block_write(p.DMA_CH0.reborrow(), &[0x9801_ABCDu32, 0xFFAC_9802, 0xDEAD_9803]).await;
info!("Multiple u64 writes (async)");
async_block_write(
p.DMA_CH0.reborrow(),
&[
0xA55A_1111_3333_5555_u64,
0x1111_A55A_3333_5555,
0x5555_A55A_3333_1111,
0x01234_5678_89AB_CDEF,
],
)
.await;
}
// TODO: Mixed byte and word transfers.
info!("Test OK");
cortex_m::asm::bkpt();
}
fn single_read<W: Word + Copy + Default + Eq + defmt::Format>(mut channel: Peri<'_, impl Channel>, mut src: W) {
let options = TransferOptions::default();
let mut dst = W::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
slice::from_mut(&mut dst),
options,
))
};
transfer.blocking_wait();
assert_eq!(src, dst);
}
async fn async_single_read<W: Word + Copy + Default + Eq + defmt::Format>(
mut channel: Peri<'_, impl Channel>,
mut src: W,
) {
let options = TransferOptions::default();
let mut dst = W::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
slice::from_mut(&mut dst),
options,
))
};
transfer.await;
assert_eq!(src, dst);
}
fn block_read<W: Word + Copy + Default + Eq + defmt::Format, const N: usize>(
mut channel: Peri<'_, impl Channel>,
mut src: W,
) {
let mut options = TransferOptions::default();
// Complete the entire transfer.
options.mode = TransferMode::Block;
let mut dst = [W::default(); N];
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
&mut dst[..],
options,
))
};
transfer.blocking_wait();
assert_eq!(dst, [src; N]);
}
async fn async_block_read<W: Word + Copy + Default + Eq + defmt::Format, const N: usize>(
mut channel: Peri<'_, impl Channel>,
mut src: W,
) {
let mut options = TransferOptions::default();
// Complete the entire transfer.
options.mode = TransferMode::Block;
let mut dst = [W::default(); N];
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
&mut dst[..],
options,
))
};
transfer.await;
assert_eq!(dst, [src; N]);
}
fn single_write<W: Word + Default + Eq + defmt::Format>(mut channel: Peri<'_, impl Channel>, src: W) {
let options = TransferOptions::default();
let mut dst = W::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_write(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
slice::from_ref(&src),
&mut dst,
options,
))
};
transfer.blocking_wait();
assert_eq!(src, dst);
}
async fn async_single_write<W: Word + Default + Eq + defmt::Format>(mut channel: Peri<'_, impl Channel>, src: W) {
let options = TransferOptions::default();
let mut dst = W::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_write(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
slice::from_ref(&src),
&mut dst,
options,
))
};
transfer.await;
assert_eq!(src, dst);
}
fn block_write<W: Word + Default + Eq + defmt::Format>(mut channel: Peri<'_, impl Channel>, src: &[W]) {
let mut options = TransferOptions::default();
// Complete the entire transfer.
options.mode = TransferMode::Block;
let mut dst = W::default();
// Starting from 1 because a zero length transfer does nothing.
for i in 1..src.len() {
info!("-> {} write(s)", i);
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_write(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&src[..i],
&mut dst,
options,
))
};
transfer.blocking_wait();
// The result will be the last value written.
assert_eq!(dst, src[i - 1]);
}
}
async fn async_block_write<W: Word + Default + Eq + defmt::Format>(mut channel: Peri<'_, impl Channel>, src: &[W]) {
let mut options = TransferOptions::default();
// Complete the entire transfer.
options.mode = TransferMode::Block;
let mut dst = W::default();
// Starting from 1 because a zero length transfer does nothing.
for i in 1..src.len() {
info!("-> {} write(s)", i);
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_write(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&src[..i],
&mut dst,
options,
))
};
transfer.await;
// The result will be the last value written.
assert_eq!(dst, src[i - 1]);
}
}
/// [`single_read`], but testing when the destination is wider than the source.
///
/// The MSPM0 DMA states that the upper bytes when the destination is longer than the source are zeroed.
/// This matches the behavior in Rust for all unsigned integer types.
fn widening_single_read<SW, DW>(mut channel: Peri<'_, impl Channel>, mut src: SW)
where
SW: Word + Copy + Default + Eq + defmt::Format,
DW: Word + Copy + Default + Eq + defmt::Format + From<SW>,
{
assert!(
DW::size() > SW::size(),
"This test only works when the destination is larger than the source"
);
let options = TransferOptions::default();
let mut dst = DW::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
slice::from_mut(&mut dst),
options,
))
};
transfer.blocking_wait();
assert_eq!(DW::from(src), dst);
}
/// [`single_read`], but testing when the destination is narrower than the source.
///
/// The MSPM0 DMA states that the upper bytes when the source is longer than the destination are dropped.
/// This matches the behavior in Rust for all unsigned integer types.
fn narrowing_single_read<SW, DW>(mut channel: Peri<'_, impl Channel>, mut src: SW)
where
SW: Word + Copy + Default + Eq + defmt::Format + From<DW>,
DW: Word + Copy + Default + Eq + defmt::Format + Narrow<SW>,
{
assert!(
SW::size() > DW::size(),
"This test only works when the source is larger than the destination"
);
let options = TransferOptions::default();
let mut dst = DW::default();
// SAFETY: src and dst outlive the transfer.
let transfer = unsafe {
unwrap!(Transfer::new_read(
channel.reborrow(),
Transfer::SOFTWARE_TRIGGER,
&mut src,
slice::from_mut(&mut dst),
options,
))
};
transfer.blocking_wait();
// The expected value is the source value masked by the maximum destination value.
// This is effectively `src as DW as SW` to drop the upper byte(s).
let expect = SW::from(DW::narrow(src));
assert_eq!(expect, dst.into());
}
/// A pseudo `as` trait to allow downcasting integer types (TryFrom could fail).
trait Narrow<T> {
fn narrow(value: T) -> Self;
}
impl Narrow<u16> for u8 {
fn narrow(value: u16) -> Self {
value as u8
}
}
impl Narrow<u32> for u8 {
fn narrow(value: u32) -> Self {
value as u8
}
}
impl Narrow<u64> for u8 {
fn narrow(value: u64) -> Self {
value as u8
}
}
impl Narrow<u32> for u16 {
fn narrow(value: u32) -> Self {
value as u16
}
}
impl Narrow<u64> for u16 {
fn narrow(value: u64) -> Self {
value as u16
}
}
impl Narrow<u64> for u32 {
fn narrow(value: u64) -> Self {
value as u32
}
}

View File

@ -4,6 +4,9 @@
#[cfg(feature = "mspm0g3507")]
teleprobe_meta::target!(b"lp-mspm0g3507");
#[cfg(feature = "mspm0g3519")]
teleprobe_meta::target!(b"lp-mspm0g3519");
use defmt::{assert_eq, unwrap, *};
use embassy_executor::Spawner;
use embassy_mspm0::mode::Blocking;
@ -23,7 +26,7 @@ async fn main(_spawner: Spawner) {
// TODO: Allow creating a looped-back UART (so pins are not needed).
// Do not select default UART since the virtual COM port is attached to UART0.
#[cfg(feature = "mspm0g3507")]
#[cfg(any(feature = "mspm0g3507", feature = "mspm0g3519"))]
let (mut tx, mut rx, mut uart) = (p.PA8, p.PA9, p.UART1);
const MFCLK_BUAD_RATES: &[u32] = &[1200, 2400, 4800, 9600, 19200, 38400, 57600, 115200];