Simplify dma usage (#1238)

* Simplify SPI,I2S,AES

* Simplify PARL_IO and I8080

* Adapt docs

* Don't require `&mut` for tx-buffer

* Annotate DMA transfer structs as `#[must_use]`

* CHANGELOG.md entry
This commit is contained in:
Björn Quentin 2024-03-13 11:14:56 +01:00 committed by GitHub
parent cc9ccb3f83
commit c283563542
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
19 changed files with 317 additions and 619 deletions

View File

@ -13,6 +13,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Fixing `esp-wifi` + `TRNG` issue on `ESP32-S2` (#1272)
### Changed
- Prefer mutable references over moving for DMA transactions (#1238)
### Removed

View File

@ -242,8 +242,6 @@ pub enum Endianness {
#[cfg(any(esp32c3, esp32c6, esp32h2, esp32s3))]
pub mod dma {
use core::mem;
use embedded_dma::{ReadBuffer, WriteBuffer};
use crate::{
@ -303,28 +301,22 @@ pub mod dma {
}
/// An in-progress DMA transfer
pub struct AesDmaTransferRxTx<'d, C, RBUFFER, TBUFFER>
#[must_use]
pub struct AesDmaTransferRxTx<'t, 'd, C>
where
C: ChannelTypes,
C::P: AesPeripheral,
{
aes_dma: AesDma<'d, C>,
rbuffer: RBUFFER,
tbuffer: TBUFFER,
aes_dma: &'t mut AesDma<'d, C>,
}
impl<'d, C, RXBUF, TXBUF> DmaTransferRxTx<RXBUF, TXBUF, AesDma<'d, C>>
for AesDmaTransferRxTx<'d, C, RXBUF, TXBUF>
impl<'t, 'd, C> DmaTransferRxTx for AesDmaTransferRxTx<'t, 'd, C>
where
C: ChannelTypes,
C::P: AesPeripheral,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// AES instance.
fn wait(
self,
) -> Result<(RXBUF, TXBUF, AesDma<'d, C>), (DmaError, RXBUF, TXBUF, AesDma<'d, C>)>
{
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
while self.aes_dma.aes.aes.state().read().state().bits() != 2 // DMA status DONE == 2
@ -335,25 +327,10 @@ pub mod dma {
self.aes_dma.finish_transform();
let err = self.aes_dma.channel.rx.has_error() || self.aes_dma.channel.tx.has_error();
// `DmaTransferRxTx` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransferRxTx`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let rbuffer = core::ptr::read(&self.rbuffer);
let tbuffer = core::ptr::read(&self.tbuffer);
let payload = core::ptr::read(&self.aes_dma);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, rbuffer, tbuffer, payload))
} else {
Ok((rbuffer, tbuffer, payload))
}
if self.aes_dma.channel.rx.has_error() || self.aes_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -364,7 +341,7 @@ pub mod dma {
}
}
impl<'d, C, RXBUF, TXBUF> Drop for AesDmaTransferRxTx<'d, C, RXBUF, TXBUF>
impl<'t, 'd, C> Drop for AesDmaTransferRxTx<'t, 'd, C>
where
C: ChannelTypes,
C::P: AesPeripheral,
@ -409,14 +386,14 @@ pub mod dma {
/// This will return a [AesDmaTransferRxTx] owning the buffer(s) and the
/// AES instance. The maximum amount of data to be sent/received
/// is 32736 bytes.
pub fn process<TXBUF, RXBUF>(
mut self,
words: TXBUF,
mut read_buffer: RXBUF,
pub fn process<'t, TXBUF, RXBUF>(
&'t mut self,
words: &'t TXBUF,
read_buffer: &'t mut RXBUF,
mode: Mode,
cipher_mode: CipherMode,
key: [u8; 16],
) -> Result<AesDmaTransferRxTx<'d, C, RXBUF, TXBUF>, crate::dma::DmaError>
) -> Result<AesDmaTransferRxTx<'t, 'd, C>, crate::dma::DmaError>
where
TXBUF: ReadBuffer<Word = u8>,
RXBUF: WriteBuffer<Word = u8>,
@ -434,11 +411,7 @@ pub mod dma {
key,
)?;
Ok(AesDmaTransferRxTx {
aes_dma: self,
rbuffer: read_buffer,
tbuffer: words,
})
Ok(AesDmaTransferRxTx { aes_dma: self })
}
#[allow(clippy::too_many_arguments)]

View File

@ -1298,18 +1298,18 @@ where
/// Trait to be implemented for an in progress dma transfer.
#[allow(drop_bounds)]
pub trait DmaTransfer<B, T>: Drop {
pub trait DmaTransfer: Drop {
/// Wait for the transfer to finish.
fn wait(self) -> Result<(B, T), (DmaError, B, T)>;
fn wait(self) -> Result<(), DmaError>;
/// Check if the transfer is finished.
fn is_done(&self) -> bool;
}
/// Trait to be implemented for an in progress dma transfer.
#[allow(clippy::type_complexity, drop_bounds)]
pub trait DmaTransferRxTx<BR, BT, T>: Drop {
pub trait DmaTransferRxTx: Drop {
/// Wait for the transfer to finish.
fn wait(self) -> Result<(BR, BT, T), (DmaError, BR, BT, T)>;
fn wait(self) -> Result<(), DmaError>;
/// Check if the transfer is finished.
fn is_done(&self) -> bool;
}

View File

@ -198,16 +198,16 @@ impl DataFormat {
}
/// An in-progress DMA write transfer.
pub struct I2sWriteDmaTransfer<'d, T, CH, BUFFER>
#[must_use]
pub struct I2sWriteDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
{
i2s_tx: I2sTx<'d, T, CH>,
buffer: BUFFER,
i2s_tx: &'t mut I2sTx<'d, T, CH>,
}
impl<'d, T, CH, BUFFER> I2sWriteDmaTransfer<'d, T, CH, BUFFER>
impl<'t, 'd, T, CH> I2sWriteDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
@ -235,61 +235,32 @@ where
/// Stop for the DMA transfer and return the buffer and the
/// I2sTx instance.
#[allow(clippy::type_complexity)]
pub fn stop(self) -> Result<(BUFFER, I2sTx<'d, T, CH>), (DmaError, BUFFER, I2sTx<'d, T, CH>)> {
pub fn stop(self) -> Result<(), DmaError> {
T::tx_stop();
let err = self.i2s_tx.tx_channel.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.i2s_tx);
core::mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.i2s_tx.tx_channel.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
}
impl<'d, T, CH, BUFFER> DmaTransfer<BUFFER, I2sTx<'d, T, CH>>
for I2sWriteDmaTransfer<'d, T, CH, BUFFER>
impl<'t, 'd, T, CH> DmaTransfer for I2sWriteDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// I2sTx instance.
fn wait(self) -> Result<(BUFFER, I2sTx<'d, T, CH>), (DmaError, BUFFER, I2sTx<'d, T, CH>)> {
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
self.i2s_tx.wait_tx_dma_done().ok();
let err = self.i2s_tx.tx_channel.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.i2s_tx);
core::mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.i2s_tx.tx_channel.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -299,7 +270,7 @@ where
}
}
impl<'d, T, CH, BUFFER> Drop for I2sWriteDmaTransfer<'d, T, CH, BUFFER>
impl<'t, 'd, T, CH> Drop for I2sWriteDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
@ -323,31 +294,34 @@ where
/// Write I2S.
/// Returns [I2sWriteDmaTransfer] which represents the in-progress DMA
/// transfer
fn write_dma(self, words: TXBUF) -> Result<I2sWriteDmaTransfer<'d, T, CH, TXBUF>, Error>
fn write_dma<'t>(
&'t mut self,
words: &'t TXBUF,
) -> Result<I2sWriteDmaTransfer<'t, 'd, T, CH>, Error>
where
TXBUF: ReadBuffer<Word = u8>;
/// Continuously write to I2S. Returns [I2sWriteDmaTransfer] which
/// represents the in-progress DMA transfer
fn write_dma_circular(
self,
words: TXBUF,
) -> Result<I2sWriteDmaTransfer<'d, T, CH, TXBUF>, Error>
fn write_dma_circular<'t>(
&'t mut self,
words: &'t TXBUF,
) -> Result<I2sWriteDmaTransfer<'t, 'd, T, CH>, Error>
where
TXBUF: ReadBuffer<Word = u8>;
}
/// An in-progress DMA read transfer.
pub struct I2sReadDmaTransfer<'d, T, CH, BUFFER>
#[must_use]
pub struct I2sReadDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
{
i2s_rx: I2sRx<'d, T, CH>,
buffer: BUFFER,
i2s_rx: &'t mut I2sRx<'d, T, CH>,
}
impl<'d, T, CH, BUFFER> I2sReadDmaTransfer<'d, T, CH, BUFFER>
impl<'t, 'd, T, CH> I2sReadDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
@ -361,72 +335,38 @@ where
Ok(self.i2s_rx.rx_channel.pop(data)?)
}
/// Wait for the DMA transfer to complete and return the buffers and the
/// I2sTx instance after copying the read data to the given buffer.
/// Length of the received data is returned at the third element of the
/// tuple.
/// Wait for the DMA transfer to complete.
/// Length of the received data is returned
#[allow(clippy::type_complexity)]
pub fn wait_receive(
mut self,
dst: &mut [u8],
) -> Result<(BUFFER, I2sRx<'d, T, CH>, usize), (DmaError, BUFFER, I2sRx<'d, T, CH>, usize)>
{
pub fn wait_receive(self, dst: &mut [u8]) -> Result<usize, (DmaError, usize)> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
self.i2s_rx.wait_rx_dma_done().ok();
let len = self.i2s_rx.rx_channel.drain_buffer(dst).unwrap();
let err = self.i2s_rx.rx_channel.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.i2s_rx);
core::mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload, len))
} else {
Ok((buffer, payload, len))
}
if self.i2s_rx.rx_channel.has_error() {
Err((DmaError::DescriptorError, len))
} else {
Ok(len)
}
}
}
impl<'d, T, CH, BUFFER> DmaTransfer<BUFFER, I2sRx<'d, T, CH>>
for I2sReadDmaTransfer<'d, T, CH, BUFFER>
impl<'t, 'd, T, CH> DmaTransfer for I2sReadDmaTransfer<'t, 'd, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// I2sTx instance.
fn wait(self) -> Result<(BUFFER, I2sRx<'d, T, CH>), (DmaError, BUFFER, I2sRx<'d, T, CH>)> {
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
self.i2s_rx.wait_rx_dma_done().ok();
let err = self.i2s_rx.rx_channel.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.i2s_rx);
core::mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.i2s_rx.rx_channel.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -436,7 +376,7 @@ where
}
}
impl<T, CH, BUFFER> Drop for I2sReadDmaTransfer<'_, T, CH, BUFFER>
impl<'t, T, CH> Drop for I2sReadDmaTransfer<'t, '_, T, CH>
where
T: RegisterAccess,
CH: ChannelTypes,
@ -460,14 +400,20 @@ where
/// Read I2S.
/// Returns [I2sReadDmaTransfer] which represents the in-progress DMA
/// transfer
fn read_dma(self, words: RXBUF) -> Result<I2sReadDmaTransfer<'d, T, CH, RXBUF>, Error>
fn read_dma<'t>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<I2sReadDmaTransfer<'t, 'd, T, CH>, Error>
where
RXBUF: WriteBuffer<Word = u8>;
/// Continuously read from I2S.
/// Returns [I2sReadDmaTransfer] which represents the in-progress DMA
/// transfer
fn read_dma_circular(self, words: RXBUF) -> Result<I2sReadDmaTransfer<'d, T, CH, RXBUF>, Error>
fn read_dma_circular<'t>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<I2sReadDmaTransfer<'t, 'd, T, CH>, Error>
where
RXBUF: WriteBuffer<Word = u8>;
}
@ -628,11 +574,11 @@ where
Ok(())
}
fn start_tx_transfer<TXBUF>(
mut self,
words: TXBUF,
fn start_tx_transfer<'t, TXBUF>(
&'t mut self,
words: &'t TXBUF,
circular: bool,
) -> Result<I2sWriteDmaTransfer<'d, T, CH, TXBUF>, Error>
) -> Result<I2sWriteDmaTransfer<'t, 'd, T, CH>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -653,10 +599,7 @@ where
// start: set I2S_TX_START
T::tx_start();
Ok(I2sWriteDmaTransfer {
i2s_tx: self,
buffer: words,
})
Ok(I2sWriteDmaTransfer { i2s_tx: self })
}
fn wait_tx_dma_done(&self) -> Result<(), Error> {
@ -713,17 +656,20 @@ where
T: RegisterAccess,
CH: ChannelTypes,
{
fn write_dma(self, words: TXBUF) -> Result<I2sWriteDmaTransfer<'d, T, CH, TXBUF>, Error>
fn write_dma<'t>(
&'t mut self,
words: &'t TXBUF,
) -> Result<I2sWriteDmaTransfer<'t, 'd, T, CH>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
self.start_tx_transfer(words, false)
}
fn write_dma_circular(
self,
words: TXBUF,
) -> Result<I2sWriteDmaTransfer<'d, T, CH, TXBUF>, Error>
fn write_dma_circular<'t>(
&'t mut self,
words: &'t TXBUF,
) -> Result<I2sWriteDmaTransfer<'t, 'd, T, CH>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -787,11 +733,11 @@ where
Ok(())
}
fn start_rx_transfer<RXBUF>(
mut self,
mut words: RXBUF,
fn start_rx_transfer<'t, RXBUF>(
&'t mut self,
words: &'t mut RXBUF,
circular: bool,
) -> Result<I2sReadDmaTransfer<'d, T, CH, RXBUF>, Error>
) -> Result<I2sReadDmaTransfer<'t, 'd, T, CH>, Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -820,10 +766,7 @@ where
#[cfg(esp32)]
T::rx_start(len);
Ok(I2sReadDmaTransfer {
i2s_rx: self,
buffer: words,
})
Ok(I2sReadDmaTransfer { i2s_rx: self })
}
fn wait_rx_dma_done(&self) -> Result<(), Error> {
@ -891,14 +834,20 @@ where
T: RegisterAccess,
CH: ChannelTypes,
{
fn read_dma(self, words: RXBUF) -> Result<I2sReadDmaTransfer<'d, T, CH, RXBUF>, Error>
fn read_dma<'t>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<I2sReadDmaTransfer<'t, 'd, T, CH>, Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
self.start_rx_transfer(words, false)
}
fn read_dma_circular(self, words: RXBUF) -> Result<I2sReadDmaTransfer<'d, T, CH, RXBUF>, Error>
fn read_dma_circular<'t>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<I2sReadDmaTransfer<'t, 'd, T, CH>, Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -2130,7 +2079,7 @@ pub mod asynch {
&mut self,
f: impl FnOnce(&mut [u8]) -> usize,
) -> Result<usize, Error> {
let avail = self.available().await;
let _avail = self.available().await;
Ok(self.i2s_tx.tx_channel.push_with(f)?)
}
}

View File

@ -296,12 +296,12 @@ where
Ok(())
}
pub fn send_dma<TXBUF>(
mut self,
pub fn send_dma<'t, TXBUF>(
&'t mut self,
cmd: impl Into<Command<P::Word>>,
dummy: u8,
data: TXBUF,
) -> Result<Transfer<'d, TX, TXBUF, P>, DmaError>
data: &'t TXBUF,
) -> Result<Transfer<'t, 'd, TX, P>, DmaError>
where
TXBUF: ReadBuffer<Word = P::Word>,
{
@ -313,7 +313,6 @@ where
Ok(Transfer {
instance: Some(self),
buffer: Some(data),
})
}
}
@ -432,17 +431,15 @@ impl<'d, TX, P> core::fmt::Debug for I8080<'d, TX, P> {
}
/// An in-progress transfer
pub struct Transfer<'d, TX: Tx, BUFFER, P> {
instance: Option<I8080<'d, TX, P>>,
buffer: Option<BUFFER>,
#[must_use]
pub struct Transfer<'t, 'd, TX: Tx, P> {
instance: Option<&'t mut I8080<'d, TX, P>>,
}
impl<'d, TX: Tx, BUFFER, P> Transfer<'d, TX, BUFFER, P> {
impl<'t, 'd, TX: Tx, P> Transfer<'t, 'd, TX, P> {
#[allow(clippy::type_complexity)]
pub fn wait(
mut self,
) -> Result<(BUFFER, I8080<'d, TX, P>), (DmaError, BUFFER, I8080<'d, TX, P>)> {
let mut instance = self
pub fn wait(mut self) -> Result<(), DmaError> {
let instance = self
.instance
.take()
.expect("instance must be available throughout object lifetime");
@ -454,15 +451,10 @@ impl<'d, TX: Tx, BUFFER, P> Transfer<'d, TX, BUFFER, P> {
instance.tear_down_send();
}
let buffer = self
.buffer
.take()
.expect("buffer must be available throughout object lifetime");
if instance.tx_channel.has_error() {
Err((DmaError::DescriptorError, buffer, instance))
Err(DmaError::DescriptorError)
} else {
Ok((buffer, instance))
Ok(())
}
}
@ -477,7 +469,7 @@ impl<'d, TX: Tx, BUFFER, P> Transfer<'d, TX, BUFFER, P> {
}
}
impl<'d, TX: Tx, BUFFER, P> Drop for Transfer<'d, TX, BUFFER, P> {
impl<'t, 'd, TX: Tx, P> Drop for Transfer<'t, 'd, TX, P> {
fn drop(&mut self) {
if let Some(instance) = self.instance.as_mut() {
// This will cancel the transfer.

View File

@ -43,11 +43,9 @@
//!
//! ### Start TX transfer
//! ```no_run
//! let transfer = parl_io_tx.write_dma(buffer).unwrap();
//! let mut transfer = parl_io_tx.write_dma(buffer).unwrap();
//!
//! // the buffer and driver is moved into the transfer and we can get it back via
//! // `wait`
//! (buffer, parl_io_tx) = transfer.wait().unwrap();
//! transfer.wait().unwrap();
//! ```
//!
//! ### Initialization for RX
@ -75,15 +73,10 @@
//!
//! ### Start RX transfer
//! ```no_run
//! let transfer = parl_io_rx.read_dma(buffer).unwrap();
//!
//! // the buffer and driver is moved into the transfer and we can get it back via
//! // `wait`
//! (buffer, parl_io_rx) = transfer.wait().unwrap();
//! let mut transfer = parl_io_rx.read_dma(buffer).unwrap();
//! transfer.wait().unwrap();
//! ```
use core::mem;
use embedded_dma::{ReadBuffer, WriteBuffer};
use fugit::HertzU32;
use peripheral::PeripheralRef;
@ -1172,10 +1165,10 @@ where
/// instance.
///
/// The maximum amount of data to be sent is 32736 bytes.
pub fn write_dma<TXBUF>(
mut self,
words: TXBUF,
) -> Result<DmaTransfer<'d, CH, TXBUF, P, CP>, Error>
pub fn write_dma<'t, TXBUF>(
&'t mut self,
words: &'t TXBUF,
) -> Result<DmaTransfer<'t, 'd, CH, P, CP>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -1187,10 +1180,7 @@ where
self.start_write_bytes_dma(ptr, len)?;
Ok(DmaTransfer {
instance: self,
buffer: words,
})
Ok(DmaTransfer { instance: self })
}
fn start_write_bytes_dma(&mut self, ptr: *const u8, len: usize) -> Result<(), Error> {
@ -1221,54 +1211,37 @@ where
}
/// An in-progress DMA transfer.
pub struct DmaTransfer<'d, C, BUFFER, P, CP>
#[must_use]
pub struct DmaTransfer<'t, 'd, C, P, CP>
where
C: ChannelTypes,
C::P: ParlIoPeripheral,
P: TxPins + ConfigurePins,
CP: TxClkPin,
{
instance: ParlIoTx<'d, C, P, CP>,
buffer: BUFFER,
instance: &'t mut ParlIoTx<'d, C, P, CP>,
}
impl<'d, C, BUFFER, P, CP> DmaTransfer<'d, C, BUFFER, P, CP>
impl<'t, 'd, C, P, CP> DmaTransfer<'t, 'd, C, P, CP>
where
C: ChannelTypes,
C::P: ParlIoPeripheral,
P: TxPins + ConfigurePins,
CP: TxClkPin,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
/// Wait for the DMA transfer to complete
#[allow(clippy::type_complexity)]
pub fn wait(
self,
) -> Result<(BUFFER, ParlIoTx<'d, C, P, CP>), (DmaError, BUFFER, ParlIoTx<'d, C, P, CP>)> {
pub fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
while !Instance::is_tx_eof() {}
Instance::set_tx_start(false);
let err = self.instance.tx_channel.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.instance);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.instance.tx_channel.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -1295,10 +1268,10 @@ where
///
/// It's only limited by the size of the DMA buffer when using
/// [EofMode::EnableSignal].
pub fn read_dma<RXBUF>(
mut self,
mut words: RXBUF,
) -> Result<RxDmaTransfer<'d, CH, RXBUF, P, CP>, Error>
pub fn read_dma<'t, RXBUF>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<RxDmaTransfer<'t, 'd, CH, P, CP>, Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -1310,10 +1283,7 @@ where
self.start_receive_bytes_dma(ptr, len)?;
Ok(RxDmaTransfer {
instance: self,
buffer: words,
})
Ok(RxDmaTransfer { instance: self })
}
fn start_receive_bytes_dma(&mut self, ptr: *mut u8, len: usize) -> Result<(), Error> {
@ -1338,30 +1308,26 @@ where
}
/// An in-progress DMA transfer.
pub struct RxDmaTransfer<'d, C, BUFFER, P, CP>
pub struct RxDmaTransfer<'t, 'd, C, P, CP>
where
C: ChannelTypes,
C::P: ParlIoPeripheral,
P: RxPins + ConfigurePins,
CP: RxClkPin,
{
instance: ParlIoRx<'d, C, P, CP>,
buffer: BUFFER,
instance: &'t mut ParlIoRx<'d, C, P, CP>,
}
impl<'d, C, BUFFER, P, CP> RxDmaTransfer<'d, C, BUFFER, P, CP>
impl<'t, 'd, C, P, CP> RxDmaTransfer<'t, 'd, C, P, CP>
where
C: ChannelTypes,
C::P: ParlIoPeripheral,
P: RxPins + ConfigurePins,
CP: RxClkPin,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
/// Wait for the DMA transfer to complete
#[allow(clippy::type_complexity)]
pub fn wait(
self,
) -> Result<(BUFFER, ParlIoRx<'d, C, P, CP>), (DmaError, BUFFER, ParlIoRx<'d, C, P, CP>)> {
pub fn wait(self) -> Result<(), DmaError> {
loop {
if self.is_done() || self.is_eof_error() {
break;
@ -1370,23 +1336,10 @@ where
Instance::set_rx_start(false);
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.instance);
let err = self.instance.rx_channel.has_error();
mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.instance.rx_channel.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}

View File

@ -759,8 +759,6 @@ where
}
pub mod dma {
use core::mem;
use embedded_dma::{ReadBuffer, WriteBuffer};
use super::*;
@ -841,56 +839,34 @@ pub mod dma {
}
}
/// An in-progress DMA transfer
pub struct SpiDmaTransferRxTx<'d, T, C, RBUFFER, TBUFFER, M>
#[must_use]
pub struct SpiDmaTransferRxTx<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
M: DuplexMode,
{
spi_dma: SpiDma<'d, T, C, M>,
rbuffer: RBUFFER,
tbuffer: TBUFFER,
spi_dma: &'t mut SpiDma<'d, T, C, M>,
}
impl<'d, T, C, RXBUF, TXBUF, M> DmaTransferRxTx<RXBUF, TXBUF, SpiDma<'d, T, C, M>>
for SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF, M>
impl<'t, 'd, T, C, M> DmaTransferRxTx for SpiDmaTransferRxTx<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
M: DuplexMode,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
fn wait(
mut self,
) -> Result<
(RXBUF, TXBUF, SpiDma<'d, T, C, M>),
(DmaError, RXBUF, TXBUF, SpiDma<'d, T, C, M>),
> {
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
self.spi_dma.spi.flush().ok();
let err = self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let rbuffer = core::ptr::read(&self.rbuffer);
let tbuffer = core::ptr::read(&self.tbuffer);
let payload = core::ptr::read(&self.spi_dma);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, rbuffer, tbuffer, payload))
} else {
Ok((rbuffer, tbuffer, payload))
}
if self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -901,7 +877,7 @@ pub mod dma {
}
}
impl<'d, T, C, RXBUF, TXBUF, M> Drop for SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF, M>
impl<'t, 'd, T, C, M> Drop for SpiDmaTransferRxTx<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
@ -914,52 +890,34 @@ pub mod dma {
}
/// An in-progress DMA transfer.
pub struct SpiDmaTransfer<'d, T, C, BUFFER, M>
#[must_use]
pub struct SpiDmaTransfer<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
M: DuplexMode,
{
spi_dma: SpiDma<'d, T, C, M>,
buffer: BUFFER,
spi_dma: &'t mut SpiDma<'d, T, C, M>,
}
impl<'d, T, C, BUFFER, M> DmaTransfer<BUFFER, SpiDma<'d, T, C, M>>
for SpiDmaTransfer<'d, T, C, BUFFER, M>
impl<'t, 'd, T, C, M> DmaTransfer for SpiDmaTransfer<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
M: DuplexMode,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
fn wait(
mut self,
) -> Result<(BUFFER, SpiDma<'d, T, C, M>), (DmaError, BUFFER, SpiDma<'d, T, C, M>)>
{
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
self.spi_dma.spi.flush().ok();
let err = self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.spi_dma);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -970,7 +928,7 @@ pub mod dma {
}
}
impl<'d, T, C, BUFFER, M> Drop for SpiDmaTransfer<'d, T, C, BUFFER, M>
impl<'t, 'd, T, C, M> Drop for SpiDmaTransfer<'t, 'd, T, C, M>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
@ -1030,10 +988,10 @@ pub mod dma {
/// instance. The maximum amount of data to be sent is 32736
/// bytes.
#[cfg_attr(feature = "place-spi-driver-in-ram", ram)]
pub fn dma_write<TXBUF>(
mut self,
words: TXBUF,
) -> Result<SpiDmaTransfer<'d, T, C, TXBUF, M>, super::Error>
pub fn dma_write<'t, TXBUF>(
&'t mut self,
words: &'t TXBUF,
) -> Result<SpiDmaTransfer<'t, 'd, T, C, M>, super::Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -1045,10 +1003,7 @@ pub mod dma {
self.spi
.start_write_bytes_dma(ptr, len, &mut self.channel.tx, false)?;
Ok(SpiDmaTransfer {
spi_dma: self,
buffer: words,
})
Ok(SpiDmaTransfer { spi_dma: self })
}
/// Perform a DMA read.
@ -1057,10 +1012,10 @@ pub mod dma {
/// instance. The maximum amount of data to be received is 32736
/// bytes.
#[cfg_attr(feature = "place-spi-driver-in-ram", ram)]
pub fn dma_read<RXBUF>(
mut self,
mut words: RXBUF,
) -> Result<SpiDmaTransfer<'d, T, C, RXBUF, M>, super::Error>
pub fn dma_read<'t, RXBUF>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<SpiDmaTransfer<'t, 'd, T, C, M>, super::Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -1072,10 +1027,7 @@ pub mod dma {
self.spi
.start_read_bytes_dma(ptr, len, &mut self.channel.rx, false)?;
Ok(SpiDmaTransfer {
spi_dma: self,
buffer: words,
})
Ok(SpiDmaTransfer { spi_dma: self })
}
/// Perform a DMA transfer.
@ -1083,11 +1035,11 @@ pub mod dma {
/// This will return a [SpiDmaTransfer] owning the buffer(s) and the SPI
/// instance. The maximum amount of data to be sent/received is
/// 32736 bytes.
pub fn dma_transfer<TXBUF, RXBUF>(
mut self,
words: TXBUF,
mut read_buffer: RXBUF,
) -> Result<SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF, M>, super::Error>
pub fn dma_transfer<'t, TXBUF, RXBUF>(
&'t mut self,
words: &'t TXBUF,
read_buffer: &'t mut RXBUF,
) -> Result<SpiDmaTransferRxTx<'t, 'd, T, C, M>, super::Error>
where
TXBUF: ReadBuffer<Word = u8>,
RXBUF: WriteBuffer<Word = u8>,
@ -1108,11 +1060,7 @@ pub mod dma {
&mut self.channel.rx,
false,
)?;
Ok(SpiDmaTransferRxTx {
spi_dma: self,
rbuffer: read_buffer,
tbuffer: words,
})
Ok(SpiDmaTransferRxTx { spi_dma: self })
}
}
@ -1124,14 +1072,14 @@ pub mod dma {
M: IsHalfDuplex,
{
#[cfg_attr(feature = "place-spi-driver-in-ram", ram)]
pub fn read<RXBUF>(
mut self,
pub fn read<'t, RXBUF>(
&'t mut self,
data_mode: SpiDataMode,
cmd: Command,
address: Address,
dummy: u8,
mut buffer: RXBUF,
) -> Result<SpiDmaTransfer<'d, T, C, RXBUF, M>, super::Error>
buffer: &'t mut RXBUF,
) -> Result<SpiDmaTransfer<'t, 'd, T, C, M>, super::Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -1191,21 +1139,18 @@ pub mod dma {
self.spi
.start_read_bytes_dma(ptr, len, &mut self.channel.rx, false)?;
Ok(SpiDmaTransfer {
spi_dma: self,
buffer,
})
Ok(SpiDmaTransfer { spi_dma: self })
}
#[cfg_attr(feature = "place-spi-driver-in-ram", ram)]
pub fn write<TXBUF>(
mut self,
pub fn write<'t, TXBUF>(
&'t mut self,
data_mode: SpiDataMode,
cmd: Command,
address: Address,
dummy: u8,
buffer: TXBUF,
) -> Result<SpiDmaTransfer<'d, T, C, TXBUF, M>, super::Error>
buffer: &'t TXBUF,
) -> Result<SpiDmaTransfer<'t, 'd, T, C, M>, super::Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -1265,10 +1210,7 @@ pub mod dma {
self.spi
.start_write_bytes_dma(ptr, len, &mut self.channel.tx, false)?;
Ok(SpiDmaTransfer {
spi_dma: self,
buffer,
})
Ok(SpiDmaTransfer { spi_dma: self })
}
}
@ -1516,8 +1458,7 @@ pub mod dma {
mod ehal1 {
use embedded_hal_1::spi::{ErrorType, SpiBus};
use super::{super::InstanceDma, *};
use crate::{dma::ChannelTypes, FlashSafeDma};
use super::*;
impl<'d, T, C, M> ErrorType for SpiDma<'d, T, C, M>
where

View File

@ -137,8 +137,6 @@ where
}
pub mod dma {
use core::mem;
use embedded_dma::{ReadBuffer, WriteBuffer};
use super::*;
@ -220,54 +218,33 @@ pub mod dma {
}
}
/// An in-progress DMA transfer
pub struct SpiDmaTransferRxTx<'d, T, C, RBUFFER, TBUFFER>
#[must_use]
pub struct SpiDmaTransferRxTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
spi_dma: SpiDma<'d, T, C>,
rbuffer: RBUFFER,
tbuffer: TBUFFER,
spi_dma: &'t mut SpiDma<'d, T, C>,
}
impl<'d, T, C, RXBUF, TXBUF> DmaTransferRxTx<RXBUF, TXBUF, SpiDma<'d, T, C>>
for SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF>
impl<'t, 'd, T, C> DmaTransferRxTx for SpiDmaTransferRxTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
fn wait(
mut self,
) -> Result<(RXBUF, TXBUF, SpiDma<'d, T, C>), (DmaError, RXBUF, TXBUF, SpiDma<'d, T, C>)>
{
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
while !self.is_done() {}
self.spi_dma.spi.flush().ok();
let err = self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let rbuffer = core::ptr::read(&self.rbuffer);
let tbuffer = core::ptr::read(&self.tbuffer);
let payload = core::ptr::read(&self.spi_dma);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, rbuffer, tbuffer, payload))
} else {
Ok((rbuffer, tbuffer, payload))
}
if self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -278,7 +255,7 @@ pub mod dma {
}
}
impl<'d, T, C, RXBUF, TXBUF> Drop for SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF>
impl<'t, 'd, T, C> Drop for SpiDmaTransferRxTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
@ -290,48 +267,32 @@ pub mod dma {
}
}
pub struct SpiDmaTransferRx<'d, T, C, BUFFER>
/// An in-progress DMA transfer.
#[must_use]
pub struct SpiDmaTransferRx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
spi_dma: SpiDma<'d, T, C>,
buffer: BUFFER,
spi_dma: &'t mut SpiDma<'d, T, C>,
}
impl<'d, T, C, BUFFER> DmaTransfer<BUFFER, SpiDma<'d, T, C>> for SpiDmaTransferRx<'d, T, C, BUFFER>
impl<'t, 'd, T, C> DmaTransfer for SpiDmaTransferRx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
fn wait(
mut self,
) -> Result<(BUFFER, SpiDma<'d, T, C>), (DmaError, BUFFER, SpiDma<'d, T, C>)> {
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
while !self.is_done() {}
self.spi_dma.spi.flush().ok(); // waiting for the DMA transfer is not enough
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.spi_dma);
let err =
self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error();
mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -342,7 +303,7 @@ pub mod dma {
}
}
impl<'d, T, C, BUFFER> Drop for SpiDmaTransferRx<'d, T, C, BUFFER>
impl<'t, 'd, T, C> Drop for SpiDmaTransferRx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
@ -356,50 +317,33 @@ pub mod dma {
}
/// An in-progress DMA transfer.
pub struct SpiDmaTransferTx<'d, T, C, BUFFER>
#[must_use]
pub struct SpiDmaTransferTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
spi_dma: SpiDma<'d, T, C>,
buffer: BUFFER,
spi_dma: &'t mut SpiDma<'d, T, C>,
}
impl<'d, T, C, BUFFER> DmaTransfer<BUFFER, SpiDma<'d, T, C>> for SpiDmaTransferTx<'d, T, C, BUFFER>
impl<'t, 'd, T, C> DmaTransfer for SpiDmaTransferTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
C::P: SpiPeripheral,
{
/// Wait for the DMA transfer to complete and return the buffers and the
/// SPI instance.
fn wait(
mut self,
) -> Result<(BUFFER, SpiDma<'d, T, C>), (DmaError, BUFFER, SpiDma<'d, T, C>)> {
/// Wait for the DMA transfer to complete
fn wait(self) -> Result<(), DmaError> {
// Waiting for the DMA transfer is not enough. We need to wait for the
// peripheral to finish flushing its buffers, too.
while !self.is_done() {}
self.spi_dma.spi.flush().ok();
let err = self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error();
// `DmaTransfer` needs to have a `Drop` implementation, because we accept
// managed buffers that can free their memory on drop. Because of that
// we can't move out of the `DmaTransfer`'s fields, so we use `ptr::read`
// and `mem::forget`.
//
// NOTE(unsafe) There is no panic branch between getting the resources
// and forgetting `self`.
unsafe {
let buffer = core::ptr::read(&self.buffer);
let payload = core::ptr::read(&self.spi_dma);
mem::forget(self);
if err {
Err((DmaError::DescriptorError, buffer, payload))
} else {
Ok((buffer, payload))
}
if self.spi_dma.channel.rx.has_error() || self.spi_dma.channel.tx.has_error() {
Err(DmaError::DescriptorError)
} else {
Ok(())
}
}
@ -410,7 +354,7 @@ pub mod dma {
}
}
impl<'d, T, C, BUFFER> Drop for SpiDmaTransferTx<'d, T, C, BUFFER>
impl<'t, 'd, T, C> Drop for SpiDmaTransferTx<'t, 'd, T, C>
where
T: InstanceDma<C::Tx<'d>, C::Rx<'d>>,
C: ChannelTypes,
@ -456,10 +400,10 @@ pub mod dma {
/// bytes.
///
/// The write is driven by the SPI master's sclk signal and cs line.
pub fn dma_write<TXBUF>(
mut self,
words: TXBUF,
) -> Result<SpiDmaTransferTx<'d, T, C, TXBUF>, Error>
pub fn dma_write<'t, TXBUF>(
&'t mut self,
words: &'t TXBUF,
) -> Result<SpiDmaTransferTx<'t, 'd, T, C>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
{
@ -471,10 +415,7 @@ pub mod dma {
self.spi
.start_write_bytes_dma(ptr, len, &mut self.channel.tx)
.map(move |_| SpiDmaTransferTx {
spi_dma: self,
buffer: words,
})
.map(move |_| SpiDmaTransferTx { spi_dma: self })
}
/// Register a buffer for a DMA read.
@ -484,10 +425,10 @@ pub mod dma {
/// 32736 bytes.
///
/// The read is driven by the SPI master's sclk signal and cs line.
pub fn dma_read<RXBUF>(
mut self,
mut words: RXBUF,
) -> Result<SpiDmaTransferRx<'d, T, C, RXBUF>, Error>
pub fn dma_read<'t, RXBUF>(
&'t mut self,
words: &'t mut RXBUF,
) -> Result<SpiDmaTransferRx<'t, 'd, T, C>, Error>
where
RXBUF: WriteBuffer<Word = u8>,
{
@ -499,10 +440,7 @@ pub mod dma {
self.spi
.start_read_bytes_dma(ptr, len, &mut self.channel.rx)
.map(move |_| SpiDmaTransferRx {
spi_dma: self,
buffer: words,
})
.map(move |_| SpiDmaTransferRx { spi_dma: self })
}
/// Register buffers for a DMA transfer.
@ -513,11 +451,11 @@ pub mod dma {
///
/// The data transfer is driven by the SPI master's sclk signal and cs
/// line.
pub fn dma_transfer<TXBUF, RXBUF>(
mut self,
words: TXBUF,
mut read_buffer: RXBUF,
) -> Result<SpiDmaTransferRxTx<'d, T, C, RXBUF, TXBUF>, Error>
pub fn dma_transfer<'t, TXBUF, RXBUF>(
&'t mut self,
words: &'t TXBUF,
read_buffer: &'t mut RXBUF,
) -> Result<SpiDmaTransferRxTx<'t, 'd, T, C>, Error>
where
TXBUF: ReadBuffer<Word = u8>,
RXBUF: WriteBuffer<Word = u8>,
@ -538,11 +476,7 @@ pub mod dma {
&mut self.channel.tx,
&mut self.channel.rx,
)
.map(move |_| SpiDmaTransferRxTx {
spi_dma: self,
rbuffer: read_buffer,
tbuffer: words,
})
.map(move |_| SpiDmaTransferRxTx { spi_dma: self })
}
}
}

View File

@ -16,7 +16,8 @@ use esp_hal::{
Aes,
Mode,
},
dma::{Dma, DmaDescriptor, DmaPriority},
dma::{Dma, DmaPriority},
dma_buffers,
peripherals::Peripherals,
prelude::*,
};
@ -30,73 +31,65 @@ fn main() -> ! {
let dma = Dma::new(peripherals.DMA);
let dma_channel = dma.channel0;
let mut descriptors = [DmaDescriptor::EMPTY; 1];
let mut rx_descriptors = [DmaDescriptor::EMPTY; 1];
let (input, mut tx_descriptors, mut output, mut rx_descriptors) = dma_buffers!(16, 16);
let aes = Aes::new(peripherals.AES).with_dma(dma_channel.configure(
let mut aes = Aes::new(peripherals.AES).with_dma(dma_channel.configure(
false,
&mut descriptors,
&mut tx_descriptors,
&mut rx_descriptors,
DmaPriority::Priority0,
));
let keytext = buffer1();
let plaintext = buffer2();
plaintext.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
keytext.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let keytext = [1u8, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16];
input.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
// create an array with aes128 key size
let mut keybuf = [0_u8; 16];
keybuf[..keytext.len()].copy_from_slice(keytext);
// create an array with aes block size
let mut block_buf = [0_u8; 16];
block_buf[..plaintext.len()].copy_from_slice(plaintext);
let hw_encrypted = buffer3();
let pre_hw_encrypt = cycles();
let transfer = aes
.process(
plaintext,
hw_encrypted,
&input,
&mut output,
Mode::Encryption128,
CipherMode::Ecb,
keybuf,
keytext,
)
.unwrap();
let (hw_encrypted, plaintext, aes) = transfer.wait().unwrap();
transfer.wait().unwrap();
let post_hw_encrypt = cycles();
println!(
"it took {} cycles for hw encrypt",
post_hw_encrypt - pre_hw_encrypt
);
let keytext = buffer4();
plaintext.copy_from_slice(hw_encrypted);
keytext.copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let mut hw_encrypted = [0u8; 16];
(&mut hw_encrypted[..]).copy_from_slice(output);
let mut keybuf = [0_u8; 16];
keybuf[..keytext.len()].copy_from_slice(keytext);
input.copy_from_slice(output);
let hw_decrypted = buffer5();
let pre_hw_decrypt = cycles();
let transfer = aes
.process(
plaintext,
hw_decrypted,
&input,
&mut output,
Mode::Decryption128,
CipherMode::Ecb,
keybuf,
keytext,
)
.unwrap();
let (hw_decrypted, _, _) = transfer.wait().unwrap();
transfer.wait().unwrap();
let post_hw_decrypt = cycles();
println!(
"it took {} cycles for hw decrypt",
post_hw_decrypt - pre_hw_decrypt
);
let key = GenericArray::from(keybuf);
let mut hw_decrypted = [0u8; 16];
(&mut hw_decrypted[..]).copy_from_slice(output);
// create an array with aes block size
let mut block_buf = [0_u8; 16];
block_buf[..].copy_from_slice(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
let key = GenericArray::from(keytext);
let mut block = GenericArray::from(block_buf);
let cipher = Aes128SW::new(&key);
let pre_sw_encrypt = cycles();
@ -123,31 +116,6 @@ fn main() -> ! {
loop {}
}
fn buffer1() -> &'static mut [u8; 16] {
static mut BUFFER: [u8; 16] = [0u8; 16];
unsafe { &mut BUFFER }
}
fn buffer2() -> &'static mut [u8; 16] {
static mut BUFFER: [u8; 16] = [0u8; 16];
unsafe { &mut BUFFER }
}
fn buffer3() -> &'static mut [u8; 16] {
static mut BUFFER: [u8; 16] = [0u8; 16];
unsafe { &mut BUFFER }
}
fn buffer4() -> &'static mut [u8; 16] {
static mut BUFFER: [u8; 16] = [0u8; 16];
unsafe { &mut BUFFER }
}
fn buffer5() -> &'static mut [u8; 16] {
static mut BUFFER: [u8; 16] = [0u8; 16];
unsafe { &mut BUFFER }
}
fn eq(slice1: &[u8; 16], slice2: &[u8; 16]) -> bool {
slice1.iter().zip(slice2.iter()).all(|(a, b)| a == b)
}

View File

@ -7,7 +7,7 @@
#![no_std]
#![no_main]
use core::cell::RefCell;
use core::{cell::RefCell, ptr::addr_of_mut};
use critical_section::Mutex;
use esp_backtrace as _;
@ -39,8 +39,8 @@ fn main() -> ! {
static mut _stack_end: u32;
}
let stack_top = unsafe { &mut _stack_start } as *mut _ as u32;
let stack_bottom = unsafe { &mut _stack_end } as *mut _ as u32;
let stack_top = unsafe { addr_of_mut!(_stack_start) } as *mut _ as u32;
let stack_bottom = unsafe { addr_of_mut!(_stack_end) } as *mut _ as u32;
let size = 4096;
} else {

View File

@ -26,7 +26,7 @@ use embassy_time::{Duration, Timer};
use esp_backtrace as _;
use esp_hal::{
clock::ClockControl,
dma::{DmaPriority, *},
dma::*,
dma_descriptors,
embassy::{self},
peripherals::Peripherals,

View File

@ -42,7 +42,7 @@ fn main() -> ! {
#[cfg(not(any(feature = "esp32", feature = "esp32s2")))]
let dma_channel = dma.channel0;
let (_, mut tx_descriptors, rx_buffer, mut rx_descriptors) = dma_buffers!(0, 4 * 4092);
let (_, mut tx_descriptors, mut rx_buffer, mut rx_descriptors) = dma_buffers!(0, 4 * 4092);
// Here we test that the type is
// 1) reasonably simple (or at least this will flag changes that may make it
@ -67,16 +67,14 @@ fn main() -> ! {
i2s.with_mclk(io.pins.gpio0);
}
let i2s_rx = i2s
let mut i2s_rx = i2s
.i2s_rx
.with_bclk(io.pins.gpio2)
.with_ws(io.pins.gpio4)
.with_din(io.pins.gpio5)
.build();
let buffer = rx_buffer;
let mut transfer = i2s_rx.read_dma_circular(buffer).unwrap();
let mut transfer = i2s_rx.read_dma_circular(&mut rx_buffer).unwrap();
println!("Started transfer");
loop {

View File

@ -80,7 +80,7 @@ fn main() -> ! {
&clocks,
);
let i2s_tx = i2s
let mut i2s_tx = i2s
.i2s_tx
.with_bclk(io.pins.gpio2)
.with_ws(io.pins.gpio4)
@ -90,10 +90,9 @@ fn main() -> ! {
let data =
unsafe { core::slice::from_raw_parts(&SINE as *const _ as *const u8, SINE.len() * 2) };
let buffer = tx_buffer;
let mut idx = 0;
for i in 0..usize::min(data.len(), buffer.len()) {
buffer[i] = data[idx];
for i in 0..usize::min(data.len(), tx_buffer.len()) {
tx_buffer[i] = data[idx];
idx += 1;
@ -103,7 +102,7 @@ fn main() -> ! {
}
let mut filler = [0u8; 10000];
let mut transfer = i2s_tx.write_dma_circular(buffer).unwrap();
let mut transfer = i2s_tx.write_dma_circular(&tx_buffer).unwrap();
loop {
let avail = transfer.available();

View File

@ -213,7 +213,7 @@ fn main() -> ! {
let total_pixels = width as usize * height as usize;
let total_bytes = total_pixels * 2;
let mut buffer = tx_buffer;
let buffer = tx_buffer;
for color in [RED, BLUE].iter().cycle() {
let color = color.to_be_bytes();
@ -223,20 +223,20 @@ fn main() -> ! {
let mut bytes_left_to_write = total_bytes;
let transfer = i8080.send_dma(0x2C, 0, buffer).unwrap();
(buffer, i8080) = transfer.wait().unwrap();
let transfer = i8080.send_dma(0x2C, 0, &buffer).unwrap();
transfer.wait().unwrap();
bytes_left_to_write -= buffer.len();
while bytes_left_to_write >= buffer.len() {
let transfer = i8080.send_dma(0x3C, 0, buffer).unwrap();
(buffer, i8080) = transfer.wait().unwrap();
let transfer = i8080.send_dma(0x3C, 0, &buffer).unwrap();
transfer.wait().unwrap();
bytes_left_to_write -= buffer.len();
}
if bytes_left_to_write > 0 {
let transfer = i8080.send_dma(0x3C, 0, buffer).unwrap();
(buffer, i8080) = transfer.wait().unwrap();
let transfer = i8080.send_dma(0x3C, 0, &buffer).unwrap();
transfer.wait().unwrap();
}
delay.delay_ms(1_000u32);

View File

@ -60,11 +60,8 @@ fn main() -> ! {
let mut delay = Delay::new(&clocks);
loop {
let transfer = parl_io_rx.read_dma(buffer).unwrap();
// the buffer and driver is moved into the transfer and we can get it back via
// `wait`
(buffer, parl_io_rx) = transfer.wait().unwrap();
let transfer = parl_io_rx.read_dma(&mut buffer).unwrap();
transfer.wait().unwrap();
println!("Received: {:02x?} ...", &buffer[..30]);
delay.delay_ms(500u32);

View File

@ -75,7 +75,7 @@ fn main() -> ! {
)
.unwrap();
let mut buffer = tx_buffer;
let buffer = tx_buffer;
for i in 0..buffer.len() {
buffer[i] = (i % 255) as u8;
}
@ -83,11 +83,8 @@ fn main() -> ! {
let mut delay = Delay::new(&clocks);
loop {
let transfer = parl_io_tx.write_dma(buffer).unwrap();
// the buffer and driver is moved into the transfer and we can get it back via
// `wait`
(buffer, parl_io_tx) = transfer.wait().unwrap();
let transfer = parl_io_tx.write_dma(&buffer).unwrap();
transfer.wait().unwrap();
println!("Transferred {} bytes", buffer.len());
delay.delay_ms(500u32);

View File

@ -96,7 +96,7 @@ fn main() -> ! {
let mut delay = Delay::new(&clocks);
// DMA buffer require a static life-time
let mut zero_buf = zero_buffer();
let (zero_buf, _, _, _) = dma_buffers!(0);
let send = tx_buffer;
let mut receive = rx_buffer;
@ -107,10 +107,10 @@ fn main() -> ! {
Command::Command8(0x06, SpiDataMode::Single),
Address::None,
0,
zero_buf,
&zero_buf,
)
.unwrap();
(zero_buf, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
delay.delay_ms(250u32);
// erase sector
@ -120,10 +120,10 @@ fn main() -> ! {
Command::Command8(0x20, SpiDataMode::Single),
Address::Address24(0x000000, SpiDataMode::Single),
0,
zero_buf,
&zero_buf,
)
.unwrap();
(zero_buf, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
delay.delay_ms(250u32);
// write enable
@ -133,10 +133,10 @@ fn main() -> ! {
Command::Command8(0x06, SpiDataMode::Single),
Address::None,
0,
zero_buf,
&zero_buf,
)
.unwrap();
(_, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
delay.delay_ms(250u32);
// write data / program page
@ -148,10 +148,10 @@ fn main() -> ! {
Command::Command8(0x32, SpiDataMode::Single),
Address::Address24(0x000000, SpiDataMode::Single),
0,
send,
&send,
)
.unwrap();
(_, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
delay.delay_ms(250u32);
loop {
@ -162,14 +162,14 @@ fn main() -> ! {
Command::Command8(0xeb, SpiDataMode::Single),
Address::Address32(0x000000 << 8, SpiDataMode::Quad),
4,
receive,
&mut receive,
)
.unwrap();
// here we could do something else while DMA transfer is in progress
// the buffers and spi is moved into the transfer and we can get it back via
// `wait`
(receive, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
println!("{:x?}", &receive);
for b in &mut receive.iter() {
@ -184,8 +184,3 @@ fn main() -> ! {
delay.delay_ms(250u32);
}
}
fn zero_buffer() -> &'static mut [u8; 0] {
static mut BUFFER: [u8; 0] = [0u8; 0];
unsafe { &mut BUFFER }
}

View File

@ -80,7 +80,7 @@ fn main() -> ! {
send[send.len() - 1] = i;
i = i.wrapping_add(1);
let transfer = spi.dma_transfer(send, receive).unwrap();
let transfer = spi.dma_transfer(&mut send, &mut receive).unwrap();
// here we could do something else while DMA transfer is in progress
let mut n = 0;
// Check is_done until the transfer is almost done (32000 bytes at 100kHz is
@ -89,9 +89,8 @@ fn main() -> ! {
delay.delay_ms(250u32);
n += 1;
}
// the buffers and spi is moved into the transfer and we can get it back via
// `wait`
(receive, send, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
println!(
"{:x?} .. {:x?}",
&receive[..10],

View File

@ -107,7 +107,9 @@ fn main() -> ! {
slave_receive.fill(0xff);
i = i.wrapping_add(1);
let transfer = spi.dma_transfer(slave_send, slave_receive).unwrap();
let transfer = spi
.dma_transfer(&mut slave_send, &mut slave_receive)
.unwrap();
// Bit-bang out the contents of master_send and read into master_receive
// as quickly as manageable. MSB first. Mode 0, so sampled on the rising
// edge and set on the falling edge.
@ -138,7 +140,7 @@ fn main() -> ! {
master_sclk.set_low().unwrap();
// the buffers and spi is moved into the transfer and we can get it back via
// `wait`
(slave_receive, slave_send, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
println!(
"slave got {:x?} .. {:x?}, master got {:x?} .. {:x?}",
&slave_receive[..10],
@ -150,7 +152,7 @@ fn main() -> ! {
delay.delay_ms(250u32);
slave_receive.fill(0xff);
let transfer = spi.dma_read(slave_receive).unwrap();
let transfer = spi.dma_read(&mut slave_receive).unwrap();
master_cs.set_high().unwrap();
master_cs.set_low().unwrap();
@ -168,7 +170,7 @@ fn main() -> ! {
}
}
master_cs.set_high().unwrap();
(slave_receive, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
println!(
"slave got {:x?} .. {:x?}",
&slave_receive[..10],
@ -176,7 +178,7 @@ fn main() -> ! {
);
delay.delay_ms(250u32);
let transfer = spi.dma_write(slave_send).unwrap();
let transfer = spi.dma_write(&mut slave_send).unwrap();
master_receive.fill(0);
@ -194,7 +196,7 @@ fn main() -> ! {
master_receive[j] = rb;
}
master_cs.set_high().unwrap();
(slave_send, spi) = transfer.wait().unwrap();
transfer.wait().unwrap();
println!(
"master got {:x?} .. {:x?}",